text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
HTML reader/writer for RichText
"""
# python imports
import re
from HTMLParser import HTMLParser
from xml.sax.saxutils import escape
# keepnote imports
from rednotebook.gui.keepnote.gui.richtext.textbuffer_tools import \
iter_buffer_contents, \
buffer_contents_iter_to_offset, \
normalize_tags, \
insert_buffer_contents, \
buffer_contents_apply_tags, \
TextBufferDom, \
TextDom, \
AnchorDom, \
TagDom, \
TagNameDom
from rednotebook.gui.keepnote.gui.richtext.richtextbuffer import \
RichTextBuffer, \
RichTextImage, \
RichTextHorizontalRule
from rednotebook.gui.keepnote.gui.richtext.richtext_tags import \
RichTextTag, \
RichTextModTag, \
RichTextFamilyTag, \
RichTextSizeTag, \
RichTextJustifyTag, \
RichTextFGColorTag, \
RichTextBGColorTag, \
RichTextIndentTag, \
RichTextBulletTag, \
RichTextLinkTag
# NOTE: leave this out in order to make my XHTML compatiable to HTML browsers
# <?xml version="1.0" encoding="UTF-8"?>
# constants
XHTML_HEADER = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
"""
XHTML_FOOTER = "</body></html>"
HTML_HEADER = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
"""
HTML_FOOTER = "</body></html>"
BULLET_STR = u"\u2022 "
JUSTIFY_VALUES = set([
"left",
"center",
"right",
"fill",
"justify"])
def tagcolor_to_html(c):
assert len(c) == 13
return c[0] + c[1] + c[2] + c[5] + c[6] + c[9] + c[10]
def nest_indent_tags(contents, tag_table):
"""Convert indent tags so that they nest like HTML tags"""
indent = 0
indent_closing = False
# loop through contents stream
for item in contents:
# if we are in the middle of a indent closing event, then the next
# item determines what we should do
if indent_closing:
if item[0] == "anchor" or item[0] == "text":
# if we see "content" (anchors or text) (instead of
# immediately opening a new indent) then we must close all
# indents (i.e. indent=0)
while indent > 0:
yield ("end", None, tag_table.lookup(
RichTextIndentTag.tag_name(indent)))
indent -= 1
indent_closing = False
elif item[0] == "begin":
# if we see a begining tag then check to see if its an
# indentation tag
tag = item[2]
if isinstance(tag, RichTextIndentTag):
# (A) if it is a new indentation that is of lower indent
# close all indents until we match
next_indent = tag.get_indent()
while indent > next_indent:
yield ("end", None, tag_table.lookup(
RichTextIndentTag.tag_name(indent)))
indent -= 1
indent_closing = False
else:
# do nothing
pass
# yield items
if item[0] == "begin" and \
isinstance(item[2], RichTextIndentTag):
# if item is a begining indent, open indents until we match
tag = item[2]
next_indent = tag.get_indent()
# should be true since (A) should have executed
assert next_indent >= indent
while indent < next_indent:
# open new indents until we match level
indent += 1
assert indent > 0
yield ("begin", None, tag_table.lookup(
RichTextIndentTag.tag_name(indent)))
elif item[0] == "end" and \
isinstance(item[2], RichTextIndentTag):
next_indent = item[2].get_indent()
indent_closing = True
else:
yield item
# close all remaining indents
while indent > 0:
yield ("end", None, tag_table.lookup(
RichTextIndentTag.tag_name(indent)))
indent -= 1
def unnest_indent_tags(contents):
"""Convert nested indents to unnested"""
indent = 0 # level of indent
li_stack = [] # stack of open indents
for item in contents:
kind, pos, param = item
if kind == "beginstr":
if param == "ol":
# increase indent
indent += 1
elif param.startswith("li "):
# close open indents
if len(li_stack) > 0:
yield ("endstr", None, li_stack[-1])
# start new indent
par_type = param[3:]
tagstr = "indent %d %s" % (indent, par_type)
yield ("beginstr", None, tagstr)
li_stack.append(tagstr)
# add bullet points if needed
if par_type == "bullet":
yield ("beginstr", None, "bullet")
yield ("text", None, BULLET_STR)
yield ("endstr", None, "bullet")
else:
yield item
elif kind == "endstr":
if param == "ol":
# decrease indent
indent -= 1
elif param.startswith("li "):
# stop indent
par_type = param[3:]
li_stack.pop()
yield ("endstr", None,
"indent %d %s" % (indent, par_type))
# resume previous indent
if len(li_stack) > 0:
yield ("beginstr", None, li_stack[-1])
else:
yield item
else:
yield item
def find_paragraphs(contents):
"""Wrap each paragraph with a pair of tags"""
within_par = False
others = []
par_type = "none"
pars = {"none": P_TAG,
"bullet": P_BULLET_TAG}
par_stack = []
for item in contents:
if item[0] == "text":
for item2 in others:
yield item2
others = []
if not within_par:
# starting paragraph
within_par = True
yield ("begin", None, pars[par_type])
par_stack.append(pars[par_type])
text = item[2]
i = 0
for j, c in enumerate(text):
if not within_par:
within_par = True
yield ("begin", None, pars[par_type])
par_stack.append(pars[par_type])
if c == "\n":
yield ("text", None, text[i:j+1])
yield ("end", None, par_stack.pop())
within_par = False
i = j+1
# yield remaining text
if i < j+1:
if not within_par:
within_par = True
yield ("begin", None, pars[par_type])
par_stack.append(pars[par_type])
yield ("text", None, text[i:j+1])
elif item[0] == "anchor":
for item2 in others:
yield item2
others = []
if not within_par:
# starting paragraph
within_par = True
yield ("begin", None, pars[par_type])
par_stack.append(pars[par_type])
# yield anchor
yield item
else:
# pass other items through
if item[0] == "begin" and \
isinstance(item[2], RichTextIndentTag):
par_type = item[2].get_par_indent()
others.append(item)
if within_par:
yield ("end", None, par_stack.pop())
for item in others:
yield item
def parse_css_style(stylestr):
# TODO: this parsing may be too simplistic
for statement in stylestr.split(";"):
statement = statement.strip()
if statement.startswith("font-size"):
# font size
size = int("".join(filter(lambda x: x.isdigit(),
statement.split(":")[1])))
yield "size " + str(size)
elif statement.startswith("font-family"):
# font family
yield "family " + statement.split(":")[1].strip()
elif statement.startswith("text-align"):
# text justification
align = statement.split(":")[1].strip()
if align not in JUSTIFY_VALUES:
raise HtmlError("unknown justification '%s'" % align)
if align == "justify":
yield "fill"
else:
yield align
elif statement.startswith("color"):
# foreground color
fg_color = statement.split(":")[1].strip()
if fg_color.startswith("#"):
if len(fg_color) == 4:
x, a, b, c = fg_color
fg_color = x + a + a + b + b+ c + c
if len(fg_color) == 7:
yield "fg_color " + fg_color
elif statement.startswith("background-color"):
# background color
bg_color = statement.split(":")[1].strip()
if bg_color.startswith("#"):
if len(bg_color) == 4:
x, a, b, c = bg_color
bg_color = x + a + a + b + b+ c + c
if len(bg_color) == 7:
yield "bg_color " + bg_color
class HtmlTagDom (TagDom):
def __init__(self, tag):
TagDom.__init__(self, tag)
class RichTextParTag (RichTextTag):
def __init__(self, kind):
RichTextTag.__init__(self, "p")
self.kind = kind
class RichTextLiTag (RichTextTag):
def __init__(self):
RichTextTag.__init__(self, "li ")
LI_TAG = RichTextLiTag()
P_TAG = RichTextParTag("none")
P_BULLET_TAG = RichTextParTag("bullet")
class LiHtmlTagDom (HtmlTagDom):
def __init__(self, kind):
HtmlTagDom.__init__(self, LI_TAG)
self.kind = kind
class HtmlError (StandardError):
"""Error for HTML parsing"""
pass
#=============================================================================
# tag input/output
class HtmlTagReader (object):
def __init__(self, io, htmltag):
self._io = io
self.htmltag = htmltag
def parse_starttag(self, htmltag, attrs):
pass
def parse_endtag(self, htmltag):
pass
class HtmlTagWriter (object):
def __init__(self, io, tagclass):
self._io = io
self.tagclass = tagclass
def write_tag_begin(self, out, dom, xhtml):
pass
def write_tag_end(self, out, dom, xhtml):
pass
def write(self, out, dom, xhtml):
pass
class HtmlTagModReader (HtmlTagReader):
"""simple font modifications (b/i/u)"""
html2buffer_tag = {
"b": "bold",
"i": "italic",
"u": "underline",
"strike": "strike",
"tt": "tt",
"nobr": "nowrap"}
def parse_starttag(self, htmltag, attrs):
tagstr = self.html2buffer_tag[htmltag]
self._io.append_child(TagNameDom(tagstr), True)
class HtmlTagModWriter (HtmlTagWriter):
buffer_tag2html = {
"bold": "b",
"italic": "i",
"underline": "u",
"strike": "strike",
"tt": "tt",
"nowrap": "nobr"
}
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextModTag)
def write_tag_begin(self, out, dom, xhtml):
out.write("<%s>" % self.buffer_tag2html[dom.tag.get_property("name")])
def write_tag_end(self, out, dom, xhtml):
out.write("</%s>" % self.buffer_tag2html[dom.tag.get_property("name")])
class HtmlTagLinkReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "a")
def parse_starttag(self, htmltag, attrs):
for key, value in attrs:
if key == "href":
tag = TagNameDom("link " + value)
self._io.append_child(tag, True)
break
class HtmlTagLinkWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextLinkTag)
def write_tag_begin(self, out, dom, xhtml):
tag = dom.tag
out.write('<a href="%s">' % escape(tag.get_href()))
def write_tag_end(self, out, dom, xhtml):
out.write("</a>")
class HtmlTagSpanReader (HtmlTagReader):
"""<span> tags"""
def __init__(self, io):
HtmlTagReader.__init__(self, io, "span")
def parse_starttag(self, htmltag, attrs):
for key, value in attrs:
if key == "style":
for tagstr in parse_css_style(value):
self._io.append_child(TagNameDom(tagstr), True)
class HtmlTagDivReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "div")
def parse_starttag(self, htmltag, attrs):
for key, value in attrs:
if key == "style":
for tagstr in parse_css_style(value):
self._io.append_child(TagNameDom(tagstr), True)
class HtmlTagSizeWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextSizeTag)
def write_tag_begin(self, out, dom, xhtml):
tag = dom.tag
out.write('<span style="font-size: %dpt">' % tag.get_size())
def write_tag_end(self, out, dom, xhtml):
out.write("</span>")
class HtmlTagFamilyWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextFamilyTag)
def write_tag_begin(self, out, dom, xhtml):
tag = dom.tag
out.write('<span style="font-family: %s">' % tag.get_family())
def write_tag_end(self, out, dom, xhtml):
out.write("</span>")
class HtmlTagFGColorWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextFGColorTag)
def write_tag_begin(self, out, dom, xhtml):
tag = dom.tag
out.write('<span style="color: %s">' %
tagcolor_to_html(tag.get_color()))
def write_tag_end(self, out, dom, xhtml):
out.write("</span>")
class HtmlTagBGColorWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextBGColorTag)
def write_tag_begin(self, out, dom, xhtml):
tag = dom.tag
out.write('<span style="background-color: %s">' %
tagcolor_to_html(tag.get_color()))
def write_tag_end(self, out, dom, xhtml):
out.write("</span>")
class HtmlTagAlignWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextJustifyTag)
def write_tag_begin(self, out, dom, xhtml):
tagname = dom.tag.get_property("name")
if tagname == "fill":
text = "justify"
else:
text = tagname
out.write('<div style="text-align: %s">' % text)
def write_tag_end(self, out, dom, xhtml):
out.write("</div>")
class HtmlTagParReader (HtmlTagReader):
# paragraph
# NOTE: this tag is currently not used by KeepNote, but if pasting
# text from another HTML source, KeepNote will interpret it as
# a newline char
def __init__(self, io):
HtmlTagReader.__init__(self, io, "p")
def parse_starttag(self, htmltag, attrs):
self._io.append_text("\n")
def parse_endtag(self, htmltag):
self._io.append_text("\n")
class HtmlTagListItemReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "li")
def parse_starttag(self, htmltag, attrs):
par_type = "bullet"
for key, value in attrs:
if key == "style":
for statement in value.split(";"):
key2, value2 = statement.split(":")
value2 = value2.strip()
if key2.strip() == "list-style-type":
if value2 == "disc":
par_type = "bullet"
elif value2 == "none":
par_type = "none"
tag = TagNameDom("li %s" % par_type)
self._io.append_child(tag, True)
class HtmlTagListItemWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextLiTag)
def write_tag_begin(self, out, dom, xhtml):
if dom.kind == "bullet":
#self._out.write('<li style="list-style-type: disc">')
out.write('<li>')
else:
out.write('<li style="list-style-type: none">')
def write_tag_end(self, out, dom, xhtml):
out.write("</li>\n")
class HtmlTagUnorderedListReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "ul")
def parse_starttag(self, htmltag, attrs):
self._io.append_child(TagNameDom("ol"), True)
class HtmlTagOrderedListReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "ol")
def parse_starttag(self, htmltag, attrs):
self._io.append_child(TagNameDom("ol"), True)
class HtmlTagUnorderedListWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextIndentTag)
def write_tag_begin(self, out, dom, xhtml):
out.write("<ul>")
#out.write("<ol>")
def write_tag_end(self, out, dom, xhtml):
out.write("</ul>\n")
#out.write("</ol>\n")
class HtmlTagBulletWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextBulletTag)
class HtmlTagHrReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "hr")
def parse_starttag(self, htmltag, attrs):
# horizontal break
hr = RichTextHorizontalRule()
self._io.append_text("\n")
self._io.append_child(AnchorDom(hr), False)
self._io.append_text("\n")
class HtmlTagHrWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextHorizontalRule)
def write(self, out, dom, xhtml):
if xhtml:
out.write("<hr/>")
else:
out.write("<hr>")
class HtmlTagImgReader (HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "img")
def parse_starttag(self, htmltag, attrs):
"""Parse image tag"""
img = RichTextImage()
width, height = None, None
for key, value in attrs:
if key == "src":
img.set_filename(value)
elif key == "width":
try:
width = int(value)
except ValueError, e:
# ignore width if we cannot parse it
pass
elif key == "height":
try:
height = int(value)
except ValueError, e:
# ignore height if we cannot parse it
pass
else:
# ignore other attributes
pass
img.scale(width, height)
self._io.append_child(AnchorDom(img), False)
class HtmlTagImgWriter (HtmlTagWriter):
def __init__(self, io):
HtmlTagWriter.__init__(self, io, RichTextImage)
def write(self, out, dom, xhtml):
# write image
size_str = ""
anchor = dom.anchor
size = anchor.get_size()
if size[0] is not None:
size_str += " width=\"%d\"" % size[0]
if size[1] is not None:
size_str += " height=\"%d\"" % size[1]
if xhtml:
out.write("<img src=\"%s\"%s />" %
(anchor.get_filename(), size_str))
else:
out.write("<img src=\"%s\"%s >" %
(anchor.get_filename(), size_str))
#=============================================================================
# TODO: may need to include support for ignoring information between
# <scirpt> and <style> tags
class HtmlBuffer (HTMLParser):
"""Read and write HTML for a RichTextBuffer"""
def __init__(self, out=None):
HTMLParser.__init__(self)
self._out = out
self._mod_tags = "biu"
self._newline = False
self._tag_stack = []
self._butter_contents = []
self._text_queue = []
self._within_body = False
self._partial = False
self._indent = 0
self._entity_char_map = [("&", "amp"),
(">", "gt"),
("<", "lt"),
(" ", "nbsp")]
self._entity2char = {}
for ch, name in self._entity_char_map:
self._entity2char[name] = ch
self._charref2char = {"09": "\t"}
self._tag_readers = {}
self._tag_writers = []
# misc tags
self.add_tag_reader(HtmlTagParReader(self))
self.add_tag_reader(HtmlTagHrReader(self))
self.add_tag_writer(HtmlTagHrWriter(self))
self.add_tag_reader(HtmlTagImgReader(self))
self.add_tag_writer(HtmlTagImgWriter(self))
self.add_tag_reader(HtmlTagLinkReader(self))
self.add_tag_writer(HtmlTagLinkWriter(self))
# mod tags
self.add_tag_reader(HtmlTagModReader(self, "b"))
self.add_tag_reader(HtmlTagModReader(self, "i"))
self.add_tag_reader(HtmlTagModReader(self, "u"))
self.add_tag_reader(HtmlTagModReader(self, "strike"))
self.add_tag_reader(HtmlTagModReader(self, "tt"))
self.add_tag_reader(HtmlTagModReader(self, "nobr"))
self.add_tag_writer(HtmlTagModWriter(self))
# span/div readers
self.add_tag_reader(HtmlTagSpanReader(self))
self.add_tag_reader(HtmlTagDivReader(self))
# span/div writers
self.add_tag_writer(HtmlTagAlignWriter(self))
self.add_tag_writer(HtmlTagSizeWriter(self))
self.add_tag_writer(HtmlTagFamilyWriter(self))
self.add_tag_writer(HtmlTagFGColorWriter(self))
self.add_tag_writer(HtmlTagBGColorWriter(self))
# lists
self.add_tag_reader(HtmlTagListItemReader(self))
self.add_tag_writer(HtmlTagListItemWriter(self))
self.add_tag_reader(HtmlTagUnorderedListReader(self))
self.add_tag_reader(HtmlTagOrderedListReader(self))
self.add_tag_writer(HtmlTagUnorderedListWriter(self))
self.add_tag_writer(HtmlTagBulletWriter(self))
def add_tag_reader(self, tag_reader):
self._tag_readers[tag_reader.htmltag] = tag_reader
def add_tag_writer(self, tag_writer):
self._tag_writers.append(tag_writer)
def set_output(self, out):
"""Set the output stream for HTML"""
self._out = out
#===========================================
# Reading HTML
def read(self, infile, partial=False, ignore_errors=False):
"""Read from stream infile to populate textbuffer"""
#self._text_queue = []
self._within_body = False
self._partial = partial
self._dom = TextBufferDom()
self._dom_ptr = self._dom
self._tag_stack = [(None, self._dom)]
try:
for line in infile:
self.feed(line)
self.close()
except Exception, e:
# reraise error if not ignored
if not ignore_errors:
raise
self.process_dom_read(self._dom)
return unnest_indent_tags(self._dom.get_contents())
def process_dom_read(self, dom):
"""Process a DOM after reading"""
def walk(node):
if isinstance(node, TagNameDom) and node.tagname == "ol":
# new lists imply newline if it has a previous sibling
if node.prev_sibling():
node.get_parent().insert_before(node, TextDom("\n"))
if isinstance(node, TagNameDom) and node.tagname.startswith("li "):
# list items end with an implied newline
if not (isinstance(node.last_child(), TagNameDom) and \
node.last_child().tagname == "ol"):
node.append_child(TextDom("\n"))
for child in list(node):
walk(child)
walk(dom)
def append_text(self, text):
if len(text) > 0:
last_child = self._dom_ptr.last_child()
if isinstance(last_child, TextDom):
last_child.text += text
else:
self._dom_ptr.append_child(TextDom(text))
def append_child(self, child, visit):
self._dom_ptr.append_child(child)
if visit:
self._dom_ptr = child
def handle_starttag(self, htmltag, attrs):
"""Callback for parsing a starting HTML tag"""
self._newline = False
# start a new tag on htmltag stack
self._tag_stack.append((htmltag, self._dom_ptr))
if htmltag == "html":
# ignore html tag
pass
elif htmltag == "body":
# note that we are within the body tag
self._within_body = True
elif htmltag == "br":
# insert newline
self.append_text("\n")
self._newline = True
elif htmltag in self._tag_readers:
# use tag parser
self._tag_readers[htmltag].parse_starttag(htmltag, attrs)
else:
# ingore other html tags
pass
def handle_endtag(self, htmltag):
"""Callback for parsing a ending HTML tag"""
if not self._partial:
if not self._within_body:
return
if htmltag in ("html", "body"):
self._within_body = False
return
# keep track of newline status
if htmltag != "br":
self._newline = False
if htmltag == "ul" or htmltag == "ol" or htmltag == "li":
self._newline = True
elif htmltag in self._tag_readers:
# use tag parser
self._tag_readers[htmltag].parse_endtag(htmltag)
# pop dom stack
if len(self._tag_stack) == 0:
return
else:
htmltag2, self._dom_ptr = self._tag_stack.pop()
while len(self._tag_stack) > 0 and htmltag2 != htmltag:
htmltag2, self._dom_ptr = self._tag_stack.pop()
def handle_data(self, data):
"""Callback for character data"""
if not self._partial and not self._within_body:
return
if self._newline:
data = re.sub("^\n[\n ]*", "", data)
data = re.sub("[\n ]+", " ", data)
self._newline = False
else:
data = re.sub("[\n ]+", " ", data)
if len(data) > 0:
self.append_text(data)
def handle_entityref(self, name):
"""Callback for reading entityref"""
if not self._partial and not self._within_body:
return
self.append_text(self._entity2char.get(name, ""))
def handle_charref(self, name):
"""Callback for reading charref"""
if not self._partial and not self._within_body:
return
self.append_text(self._charref2char.get(name, ""))
#================================================
# Writing HTML
def write(self, buffer_content, tag_table, title=None,
partial=False, xhtml=True):
if not partial:
self._write_header(title, xhtml=xhtml)
# normalize contents, prepare them for DOM
contents = normalize_tags(
nest_indent_tags(find_paragraphs(buffer_content), tag_table),
is_stable_tag=lambda tag:
isinstance(tag, (RichTextIndentTag, RichTextParTag)))
dom = TextBufferDom(contents)
self.prepare_dom_write(dom)
self.write_dom(dom, xhtml=xhtml)
if not partial:
self._write_footer(xhtml=xhtml)
def _write_header(self, title, xhtml=True):
if xhtml:
self._out.write(XHTML_HEADER)
else:
self._out.write(HTML_HEADER)
if title:
self._out.write(u"<title>%s</title>\n" % escape(title))
self._out.write("</head><body>")
def _write_footer(self, xhtml=True):
if xhtml:
self._out.write(XHTML_FOOTER)
else:
self._out.write(HTML_FOOTER)
def write_dom(self, dom, xhtml=True):
"""Write DOM"""
for child in dom:
if isinstance(child, TextDom):
self.write_text(child.text, xhtml=xhtml)
elif isinstance(child, TagDom):
self.write_tag_begin(child, xhtml=xhtml)
self.write_dom(child, xhtml=xhtml)
self.write_tag_end(child, xhtml=xhtml)
elif isinstance(child, AnchorDom):
self.write_anchor(child, child.anchor, xhtml=xhtml)
else:
raise Exception("unknown dom '%s'" % str(dom))
def write_text(self, text, xhtml=True):
"""Write text"""
# TODO: could use escape()
# TODO: could try to speed this up
text = text.replace("&", "&")
text = text.replace(">", ">")
text = text.replace("<", "<")
text = text.replace("\t", "	")
text = text.replace(" ", " ")
if xhtml:
text = text.replace("\n", "<br/>\n")
else:
text = text.replace("\n", "<br>\n")
self._out.write(text)
def write_anchor(self, dom, anchor, xhtml=True):
"""Write an anchor object"""
for tag_writer in self._tag_writers:
if isinstance(anchor, tag_writer.tagclass):
tag_writer.write(self._out, dom, xhtml)
return
# warning
#TODO:
print "unknown anchor element", anchor
def write_tag_begin(self, dom, xhtml=True):
"""Write opening tag of DOM"""
tag = dom.tag
for tag_writer in self._tag_writers:
if isinstance(tag, tag_writer.tagclass):
tag_writer.write_tag_begin(self._out, dom, xhtml)
return
def write_tag_end(self, dom, xhtml=True):
"""Write closing tag of DOM"""
tag = dom.tag
for tag_writer in self._tag_writers:
if isinstance(tag, tag_writer.tagclass):
tag_writer.write_tag_end(self._out, dom, xhtml)
return
def prepare_dom_write(self, dom):
"""Prepare a DOM for writing"""
# TODO: break up into separate functions
# (1) change all <p> tags to li, if inside indent
# (2) else remove <p>
# (3) insert <li> above <ol>
def walk(node, within_indent, par_type):
if isinstance(node, TagDom):
if isinstance(node.tag, RichTextParTag):
if within_indent:
# (1) change p to li
item_dom = LiHtmlTagDom(node.tag.kind)
# move all children of p to li
while True:
child = node.first_child()
if not child:
break
child.remove()
item_dom.append_child(child)
parent = node.get_parent()
parent.replace_child(node, item_dom)
return
else:
# (2) remove p
parent = node.get_parent()
# move all children of p to p.parent
while True:
child = node.first_child()
if not child:
break
child.remove()
parent.insert_before(node, child)
node.remove()
# (3) insert li above ol
elif isinstance(node.tag, RichTextIndentTag):
if within_indent:
# todo: change this to bullet
item_dom = LiHtmlTagDom("none")
parent = node.get_parent()
parent.replace_child(node, item_dom)
item_dom.append_child(node)
within_indent = True
for child in list(node):
walk(child, within_indent, par_type)
walk(dom, False, "none")
# General processing
# - <hr/> tags should consume the surronding newlines
# (it will supply them)
# - </li> consumes preceding newline
# - bullet tags and their contents should be removed
#
# TODO: could combine style tags that have only child (another style)
# walk dom in preorder traversal
last_leaf = [None]
def walk(node):
if isinstance(node, TagDom):
# remove bullet tags and their contents
if isinstance(node.tag, RichTextBulletTag):
node.remove()
return
# delete preceding newline of <hr/>
if isinstance(node, AnchorDom) and \
isinstance(node.anchor, RichTextHorizontalRule) and \
isinstance(last_leaf[0], TextDom) and \
last_leaf[0].text.endswith("\n"):
last_leaf[0].text = last_leaf[0].text[:-1]
# delete preceding newline of <ol> <ul>
if isinstance(node, TagDom) and \
isinstance(node.tag, RichTextIndentTag) and \
isinstance(last_leaf[0], TextDom) and \
last_leaf[0].text.endswith("\n"):
last_leaf[0].text = last_leaf[0].text[:-1]
# delete preceding newline of </li>
if isinstance(node, LiHtmlTagDom):
# get right most descendant
child = node.last_child()
while child and not child.is_leaf():
if isinstance(child, TagDom) and \
isinstance(child.tag, RichTextIndentTag):
# let the next li consume newline
child = None
else:
child = child.last_child()
if isinstance(child, TextDom) and \
child.text.endswith("\n"):
child.text = child.text[:-1]
if node.is_leaf():
# process leaves
# delete succeeding newline of <hr/>
if isinstance(last_leaf[0], AnchorDom) and \
isinstance(last_leaf[0].anchor, RichTextHorizontalRule) and \
isinstance(node, TextDom) and \
node.text.startswith("\n"):
node.text = node.text[1:]
# empty tags are skiped as leaves
if not isinstance(node, TagDom):
# record leaf
last_leaf[0] = node
else:
# recurse
for child in list(node):
walk(child)
# remove empty tags
if isinstance(node, TagDom) and node.is_leaf():
node.remove()
walk(dom)
|
tomka/rednotebook
|
rednotebook/gui/keepnote/gui/richtext/richtext_html.py
|
Python
|
gpl-2.0
| 36,691
|
[
"VisIt"
] |
364a47091797423ddf223488d0c554c1e9026643349899bf88e80bc33d6800e6
|
import pytest
from json import load
from GroupIBTIA import fetch_incidents_command, Client
with open('test_data/example.json') as example:
RAW_JSON = load(example)
with open('test_data/results.json') as results:
RESULTS = load(results)
# Because of errors with markdown tables
RESULTS.update({
'osi/git_leak': (
{'last_fetch': {'osi/git_leak': 1611219371626093}},
[
{
'name': 'Git Leak: conf/nginx/sites-available/whatsinmyyogurt',
'occurred': '2021-01-21T08:56:11Z',
'rawJSON': '{"dateDetected": "2021-01-21T08:56:11+00:00", "dateUpdated": "1561036415", '
'"evaluation": {"admiraltyCode": "A6", "credibility": 100, "reliability": 100, '
'"severity": "green", "tlp": "amber", "ttl": 30}, '
'"file": "https://bt.group-ib.com/api/v2/osi/git_leak'
'/f201c253ac71f7d78db39fa111a2af9d7ee7a3f7/bWFpbi01NDA4'
'YWY0MDE2ZTVmZDFjYTZlYWQzNThjYzNiMmI0YjYwNWY1NGY2ODU4Yzc'
'4YmVmMGNlYmUyZGVlMDZmMDhm", "id": "f201c253ac71f7d78db39fa111a2af9d7ee7a3f7", '
'"matchesType": ["keyword"], "matchesTypeCount": {"card": 0, '
'"cisco": 0, "commonKeywords": 0, "domain": 0, "dsn": 0, "email": 0, '
'"google": 0, "ip": 0, "keyword": 1, "login": 0, "metasploit": 0, "nmap": 0, '
'"pgp": 0, "sha": 0, "slackAPI": 0, "ssh": 0}, '
'"name": "Git Leak: conf/nginx/sites-available/whatsinmyyogurt", '
'"repository": "openfoodfacts/openfoodfacts-server", '
'"revisions": "| File | File Difference | Author Email | Author Name | Date Created |\\n'
'| ---- | --------------- | ------------ | ----------- | ------------ |\\n'
'| [https://bt.group-ib.com/api/v2/osi/git_leak]'
'(https://bt.group-ib.com/api/v2/osi/git_leak'
'/f201c253ac71f7d78db39fa111a2af9d7ee7a3f7/cmV2aXNpb24tZmlsZS01NDA4YWY0MDE2ZTVmZDF'
'jYTZlYWQzNThjYzNiMmI0YjYwNWY1NGY2ODU4Yzc4YmVmMGNlYmUyZGVlMDZmMDhm) | '
'[https://bt.group-ib.com/api/v2/osi/git_leak]'
'(https://bt.group-ib.com'
'/api/v2/osi/git_leak/f201c253ac71f7d78db39fa111a2af9d7ee7a3f7/cmV2aXNpb24tZml'
'sZURpZmYtNTQwOGFmNDAxNmU1ZmQxY2E2ZWFkMzU4Y2MzYjJiNGI2MDVmNTRmNjg1OGM3OGJlZjB'
'jZWJlMmRlZTA2ZjA4Zg==) | some@gmail.ru | sadsdsa | 2019-06-20T13:13:35+00:00 |\\n", '
'"seqUpdate": 1611219371626093, "source": "github", '
'"gibType": "osi/git_leak", "relatedIndicatorsData": [], "systemSeverity": 1}'}]),
'osi/public_leak': (
{'last_fetch': {'osi/public_leak': 1601909532153438}},
[
{
'name': 'Public Leak: a9a5b5cb9b971a2a037e3a0a30654185ea148095',
'occurred': '2020-10-05T17:51:31Z',
'rawJSON': '{"bind": [], "created": "2020-10-05T17:51:31+03:00", "data": '
'"Pasted at: 05/10/2020 15:45", "displayOptions": null, '
'"evaluation": {"admiraltyCode": "C3", "credibility": 50, '
'"reliability": 50, "severity": "orange", "tlp": "amber", "ttl": '
'30}, "hash": "a9a5b5cb9b971a2a037e3a0a30654185ea148095", "id": '
'"a9a5b5cb9b971a2a037e3a0a30654185ea148095", "language": "c", '
'"linkList": "| Author | Date Detected | Date Published | Hash | Link | Source |\\n'
'| ------ | ------------- | -------------- | ---- |----- | ------ |\\n| whaaaaaat | '
'2020-10-05T17:51:31+03:00 | 2020-10-05T17:45:46+03:00 | '
'3066db9f57b7997607208fedc45d7203029d9cb3 | '
'[https://some.ru](https://some.ru) | some.ru '
'|\\n", "matches": "| Type | Sub Type | Value |\\n| ---- | -------- | ----- |\\n| email '
'| email | some@gmail.ru |\\n", '
'"oldId": null, '
'"portalLink": "https://bt.group-ib.com/osi/public_leak?'
'searchValue=id:a9a5b5cb9b971a2a037e3a0a30654186ea248094", '
'"seqUpdate": 1601909532153438, "size": "345 B", "updated": '
'"2020-10-05T17:51:31+03:00", "useful": 1, "name": '
'"Public Leak: a9a5b5cb9b971a2a037e3a0a30654185ea148095", "gibType": '
'"osi/public_leak", "relatedIndicatorsData": [], "systemSeverity": 2}'
}
]
),
'bp/phishing_kit': (
{'last_fetch': {'bp/phishing_kit': 1614921031175}},
[
{'name': 'Phishing Kit: 8d7ea805fe20d6d77f57e2f0cadd17b1',
'occurred': '2021-01-14T12:10:41Z',
'rawJSON': '{"dateDetected": "2021-01-14T12:10:41+00:00", "dateFirstSeen": "2021-01-14T13:10:41+00:00", '
'"dateLastSeen": "2021-01-14T14:12:17+00:00", "downloadedFrom": "| URL | File Name '
'| Domain | Date |\\n| --- | --------- | ------ | ---- |\\n'
'| https://some.ru | show.zip | some.ru | 2021-01-21 10:10:41 |\\n'
'| https://some.ru | show.zip | ''some.ru '
'| 2021-01-21 10:10:41 |\\n| https://some.ru | show.zip '
'| some.ru | 2021-01-21 10:10:41 |\\n", '
'"emails": [], "evaluation": {"admiraltyCode": "B2", "credibility": 70, '
'"reliability": 80, "severity": "orange", "tlp": "amber", "ttl": '
'30}, "hash": "8d7ea805fe20d6d77f57e2f0cadd17b1", "id": '
'"044f3f2cb599228c1882884eb77eb073f68a25f2", "isFavourite": '
'false, "isHidden": false, "oldId": "396793696", "path": '
'"https://tap.group-ib.com/api/api/v2/web/attacks/phishing_kit'
'/044f3f2cb599228c1882884eb77eb073f68a25f2/file'
'/95b61a1df152012abb79c3951ed98680e0bd917bbcf1d440e76b66a120292c76", '
'"portalLink": "https://bt.group-ib.com/attacks/phishing_kit?searchValue='
'id:044f3f2cb599228c1882884eb77eb073f68a25f2", '
'"seqUpdate": 1614921031175, "targetBrand": [], "tsFirstSeen": '
'null, "tsLastSeen": null, "variables": null, "name": '
'"Phishing Kit: 8d7ea805fe20d6d77f57e2f0cadd17b1", "gibType": '
'"bp/phishing_kit", "relatedIndicatorsData": [[]], '
'"systemSeverity": 2}'}]),
})
COLLECTION_NAMES = ['compromised/account', 'compromised/card', 'osi/git_leak', 'osi/public_leak',
'bp/phishing', 'bp/phishing_kit', 'malware/targeted_malware', "compromised/breached",
'bp/domain']
@pytest.fixture(scope='function', params=COLLECTION_NAMES, ids=COLLECTION_NAMES)
def session_fixture(request):
return request.param, Client(base_url='https://some.ru')
def test_fetch_incidents_command(mocker, session_fixture):
collection_name, client = session_fixture
mocker.patch.object(client, 'create_poll_generator', return_value=[RAW_JSON[collection_name]])
result = fetch_incidents_command(client=client, last_run={}, first_fetch_time='3 days',
incident_collections=[collection_name], requests_count=1)
assert result == tuple(RESULTS[collection_name])
|
demisto/content
|
Packs/GroupIB_ThreatIntelligenceAttribution/Integrations/GroupIBTIA/GroupIBTIA_test.py
|
Python
|
mit
| 7,954
|
[
"Amber"
] |
2fa08971dfe1d39d2737d66baae258fa80817cb371e7eb2d2b0529b2fddde398
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
day_names = (
"Prometheus",
"Hercules",
"Orpheus",
"Ulysses",
"Lycurgus",
"Romulus",
"Numa",
"Belus",
"Sesostris",
"Menu",
"Cyrus",
"Zoroaster",
"The Druids",
"Buddha",
"Fuxi",
"Laozi",
"Mencius",
"Theocrats of Tibet",
"Theocrats of Japan",
"Manco Cápac",
"Confucius",
"Abraham",
"Samuel",
"Solomon",
"Isaiah",
"St. John the Baptist",
"Haroun-al-Raschid",
"Muhammad",
"Hesiod",
"Tyrtéus",
"Anacreon",
"Pindar",
"Sophocles",
"Theocritus",
"Aeschylus",
"Scopas",
"Zeuxis",
"Ictinus",
"Praxiteles",
"Lysippus",
"Apelles",
"Phidias",
"Aesop",
"Plautus",
"Terence",
"Phaedrus",
"Juvenal",
"Lucian",
"Aristophanes",
"Ennius",
"Lucretius",
"Horace",
"Tibullus",
"Ovid",
"Lucan",
"Virgil",
"Anaximander",
"Anaximenes",
"Heraclitus",
"Anaxagoras",
"Democritus",
"Herodotus",
"Thales",
"Solon",
"Xenophanes",
"Empodocles",
"Thucydides",
"Archytas",
"Apollonius of Tyana",
"Pythagoras",
"Aristippus",
"Antisthenes",
"Zeno",
"Cicero",
"Epictetus",
"Tacitus",
"Socrates",
"Xenocrates",
"Philo of Alexandria",
"St. John the Evangelist",
"St. Justin",
"St. Clement of Alexandria",
"Origen",
"Plato",
"Theophrastus",
"Herophilus",
"Erasistratus",
"Celsus",
"Galen",
"Avicenna",
"Hippocrates",
"Euclid",
"Aristéus",
"Theodisius of Bithynia",
"Hero",
"Pappus",
"Diophantus",
"Apollonius",
"Eudoxus",
"Pytheas",
"Aristarchus",
"Eratos-thenes",
"Ptolemy",
"Albategnius",
"Hipparchus",
"Varro",
"Columella",
"Vitruvius",
"Strabo",
"Frontinus",
"Plutarch",
"Pliny the Elder",
"Miltiades",
"Leonidas",
"Aristides",
"Cimon",
"Xenophon",
"Phocion",
"Themistocles",
"Pericles",
"Philip",
"Demosthenes",
"PtolemyLagus",
"Philopoemen",
"Polybius",
"Alexander",
"JuniusBrutus",
"Camillus",
"Fabricius",
"Hannibal",
"Paulus Aemilius",
"Marius",
"Scipio",
"Augustus",
"Vespasian",
"Hadrian",
"Antonius",
"Papinian",
"Alexander Severus",
"Trajan",
"St. Luke",
"St. Cyprian",
"St. Athanasius",
"St. Jerome",
"St. Ambrose",
"St. Monica",
"St. Augustine",
"Constantine",
"Theodosius",
"St. Chrysostom",
"St. Genevieve of Paris",
"St. Pulcheria",
"St. Gregory the Great",
"Hildebrand",
"St. Benedict",
"St. Boniface",
"St. Isidore of Seville",
"Lanfranc",
"Heloise",
"the architects of the Middle Ages",
"St. Bernard",
"St. Francis Xavier",
"St. Charles Borromeo",
"St. Theresa",
"St. Vincent de Paul",
"Bourdaloue",
"William Penn",
"Bossuet",
"Theodoric the Great",
"Pelayo",
"Otho the Great",
"St. Henry",
"Villers",
"Don John of Austria",
"Alfred",
"Charles Martel",
"El Cid",
"Richard I",
"Joan of Arc",
"Albuquerque",
"Bayard",
"Godfrey",
"St. Leo the Great",
"Gerbert",
"Peter the Hermit",
"Suger",
"Alexander III",
"St. Francis of Assisi",
"Innocent III",
"St. Clotilde",
"St. Bathilda",
"St. Stephen of Hungary",
"St. Elizabeth of Hungary",
"Blanche of Castille",
"St. Ferdinand III",
"St. Louis",
"the Troubadours",
"Boccaccio",
"Rabelais",
"Cervantes",
"La Fontain",
"Defoe",
"Aristo",
"Leonardo da Vinci",
"Michael Angelo",
"Holbein",
"Poussin",
"Velásquez",
"Teniers",
"Raphael",
"Froissart",
"Camoens",
"The Spanish Romancers",
"Chateaubriand",
"Walter Scott",
"Manzoni",
"Tasso",
"Petrarca",
"Thomas of Kempis",
"Mademoiselle de Lafayette",
"Fénélon",
"Klopstock",
"Byron",
"Milton",
"Marco Polo",
"Jacques Coeur",
"Vasco de Gama",
"Napier",
"Lacaille",
"Cook",
"Columbus",
"Benvenuto Cellini",
"Amontons",
"Harrison",
"Dollond",
"Arkwright",
"Conté",
"Vaucanson",
"Stevin",
"Mariotte",
"Papin",
"Black",
"Jouffroy",
"Dalton",
"Watt",
"Bernard de Palissy",
"Guglielmini",
"Duhamel du Monceau",
"Saussure",
"Coulomb",
"Carnot",
"Montgolfier",
"Lope de Vega",
"Moreto",
"Rojas",
"Otway",
"Lessing",
"Goethe",
"Calderon",
"Tirso",
"Vondel",
"Racine",
"Voltaire",
"Metastasio",
"Schiller",
"Corneille",
"Almarcon",
"Mademoiselle de Motteville",
"Mademoiselle de Sévigné",
"Lesage",
"Mademoiselle deStaal",
"Fielding",
"Moliere",
"Pergolese",
"Sacchini",
"Gluck",
"Beethoven",
"Rossini",
"Bellini",
"Mozart",
"Albertus Magnus",
"Roger Bacon",
"St. Bonaventura",
"Ramus",
"Montaigne",
"Campanella",
"St. Thomas Aquinas",
"Hobbes",
"Pascal",
"Locke",
"Vauvenargues",
"Diderot",
"Cabanis",
"Lordbacon",
"Grotius",
"Fontenelle",
"Vico",
"Fréret",
"Montesquieu",
"Buffon",
"Leibnitz",
"Robertson",
"Adam Smith",
"Kant",
"Condercet",
"Joseph de Maistre",
"Hegel",
"Hume",
"Marie de Molina",
"Cosimo de Medici the Elder",
"Philippe de Comines",
"Isabella of Castille",
"Charles V",
"Henry Iv",
"Louis Xi",
"L'Hôpital",
"Barneveldt",
"Gustavus Adolphus",
"De Witt",
"Ruyter",
"William III",
"William the Silent",
"Ximenes",
"Sully",
"Mazarin",
"Colbert",
"D'Aranda",
"Turgot",
"Richelieu",
"Sidney",
"Franklin",
"Washington",
"Jefferson",
"Bolivar",
"Francia",
"Cromwell",
"Copernicus",
"Kepler",
"Huyghens",
"James Bernouilli",
"Bradley",
"Volta",
"Galileo",
"Vieta",
"Wallis",
"Clairaut",
"Euler",
"D'Alembert",
"Lagrange",
"Newton",
"Bergmann",
"Priestley",
"Cavendish",
"Guyton Morveau",
"Berthollet",
"Berzelius",
"Lavoisier",
"Harvey",
"Boerhaave",
"Linnaeus",
"Haller",
"Lamarck",
"Broussais",
"Gall",
"the Dead",
"Sainted Women"
)
leap_day_replacements = {
0: "Cadmus",
1: "Theseus",
2: "Tiresias",
7: "Semiramus",
12: "Ossian",
19: "Tamehameha",
21: "Joseph",
23: "David",
26: "Abderrahman",
29: "Sappho",
32: "Euripides",
33: "Longus",
42: "Pilpay",
44: "Menander",
60: "Leucippus",
67: "Philolaus",
73: "Pliny the Younger",
74: "Arrian",
80: "St. Irenaeus",
82: "Tertullian",
89: "Averrhoes",
94: "Ctesibius",
98: "Aratus",
99: "Nearchus",
100: "Berosus",
101: "Sosigenes",
103: "Nasreddin",
117: "Epaminondas",
127: "Cincinnatus",
128: "Regulus",
131: "the Gracchi",
133: "Maecenas",
134: "Titus",
135: "Nerva",
136: "Marcus Aurelius",
137: "Ulpian",
138: "Aetius",
140: "St. James",
149: "St. Basil",
151: "Marcian",
154: "St. Anthony",
155: "St. Austin",
156: "St. Bruno",
157: "St. Anselm",
158: "Beatrice",
159: "St. Benezet",
161: "Ignatius Loyola",
162: "Fredrick Borromeo",
163: "St. Catharine of Siena",
164: "Abbé de l'Epée",
165: "Claude Fleury",
166: "George Fox",
170: "Henry the Fowler",
172: "La Valette",
173: "John Sobieski",
176: "Tancred",
177: "Saladin",
178: "Marina",
179: "Sir Walter Raleigh",
182: "Leo IV",
183: "Peter Damian",
185: "St. Eligius",
186: "Becket",
187: "St. Dominic",
190: "St. Mathilda of Tuscany",
191: "Mathias Corvinus",
194: "Alfonso X",
197: "Chaucer",
198: "Swift",
200: "Burns",
201: "Goldsmith",
203: "Titian",
204: "Paul Veronese",
205: "Rembrandt",
206: "Lesueuer",
207: "Murillo",
208: "Rubens",
210: "Joinville",
211: "Spenser",
214: "James Fenimore Cooper",
218: "Louis of Granada & Bunyan",
219: "Mademoiselle de Staël",
220: "St. Francis of Sales",
221: "Gessner",
222: "Élisa Mercœur & Shelly",
224: "Chardin",
225: "Gresham",
226: "Magellan",
227: "Briggs",
228: "Delambre",
229: "Tasman",
232: "Wheatstone",
233: "Pierre Leroy",
234: "Graham",
235: "Jacquard",
238: "Torricelli",
239: "Boyle",
240: "Worcester",
242: "Fulton",
243: "Thilorier",
246: "Riquet",
247: "Bourgelat",
248: "Bouguer",
249: "Borda",
250: "Vauban",
252: "Montalvan",
253: "Guillem de Castro",
254: "Guevara",
263: "Alfieri",
267: "Mademoiselle Roland",
268: "Lady Montagu",
269: "Sterne",
270: "Miss Edgeworth",
271: "Richardson",
273: "Palestrina",
274: "Grétry",
275: "Lully",
276: "Handel",
277: "Weber",
278: "Donizeti",
280: "John of Salisbury",
281: "Raymond Lully",
282: "Joachim",
283: "Nicholas of Cusa",
284: "Erasmus",
285: "Sir Thomas More",
287: "Spinoza",
288: "Giordano Bruno",
289: "Malebranche",
290: "Mademoiselle de Lambert",
291: "Duclos",
292: "George Leroy",
294: "Cujas",
295: "Maupertuis",
296: "Herder",
297: "Wincklemann",
298: "D'Aguesseau",
299: "Oken",
301: "Gibbon",
302: "Dunoyer",
303: "Fichte",
304: "Ferguson",
305: "Bonald",
306: "Sophie Germain",
310: "Guicciardini",
312: "Sixtus V",
323: "Oxenstiern",
324: "Walpole",
325: "Louis XIV",
326: "Pombal",
327: "Campomanes",
329: "Lambert",
330: "Hampden",
331: "Kosciusko",
332: "Madison",
333: "Toussaint L'Ouverture",
336: "Tycho Brahe",
337: "Halley",
338: "Varignon",
339: "John Bernouilli",
340: "Römer",
341: "Sauveur",
343: "Harriot",
344: "Fermat",
345: "Poinsot",
346: "Monge",
347: "Daniel Bernouilli",
348: "Joseph Fourier",
350: "Scheele",
351: "Davy",
353: "Geoffroy",
355: "Ritter",
357: "Charles Bell",
358: "Stahl & Barthez",
359: "Bernard de Jussieu",
360: "Félix Vicq-d'Azyr",
361: "Blainville",
362: "Morgagni",
}
day_names_leap = [leap_day_replacements.get(i, x) for i, x in enumerate(day_names)]
festivals = {
(1, 1): "the Great Being",
(1, 7): "religion",
(1, 14): "history",
(1, 21): "nation",
(1, 28): "community",
(2, 7): "complete marriage",
(2, 14): "chaste marriage",
(2, 21): "unequal marriage",
(2, 28): "subjective marriage",
(3, 7): "natural fatherhood",
(3, 14): "artificial fatherhood",
(3, 21): "spiritual fatherhood",
(3, 28): "temporal fatherhood",
(4, 7): "natural filiation",
(4, 14): "artificial filiation",
(4, 21): "spiritual filiation",
(4, 28): "temporal filiation",
(5, 7): "natural brotherhood",
(5, 14): "artificial brotherhood",
(5, 21): "spiritual brotherhood",
(5, 28): "temporal brotherhood",
(6, 7): "complete permanent domesticity",
(6, 14): "incomplete permanent domesticity",
(6, 21): "complete passing domesticity",
(6, 28): "incomplete passing domesticity",
(7, 7): "animal gods",
(7, 14): "fire gods",
(7, 21): "sun gods",
(7, 28): "war gods",
(8, 7): "castes",
(8, 14): "polytheistic arts",
(8, 21): "polytheistic theory",
(8, 28): "polytheistic society",
(9, 7): "monotheistic theology",
(9, 14): "Catholocism",
(9, 21): "Islam",
(9, 28): "metaphysics",
(10, 7): "the mother",
(10, 14): "the wife",
(10, 21): "the daughter",
(10, 28): "the sister",
(11, 7): "artistic intellectuals",
(11, 14): "scientific intellectuals",
(11, 21): "secondary intellectual providence",
(11, 28): "the elderly",
(12, 7): "the bank",
(12, 14): "commerce",
(12, 21): "manufacturing",
(12, 28): "agriculture",
(13, 7): "inventors",
(13, 14): "emotional labor",
(13, 21): "meditation",
(13, 28): "passive labor",
(14, 1): "the Dead",
(14, 2): "Sainted Women",
}
|
mpercich/Calendarize
|
ios/dateparser/lib/python2.7/site-packages/convertdate/data/positivist.py
|
Python
|
mit
| 12,459
|
[
"COLUMBUS",
"Dalton"
] |
5c72b059c91fd34ea2c2d6cec038689e6ca5cffe1666a6a1ff787d54a9d5965d
|
""" The Matcher service provides an XMLRPC interface for matching jobs to pilots
It uses a Matcher and a Limiter object that encapsulated the matching logic.
It connects to JobDB, TaskQueueDB, and PilotAgentsDB.
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.Client.Matcher import Matcher
from DIRAC.WorkloadManagementSystem.Client.Limiter import Limiter
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
gJobDB = False
gTaskQueueDB = False
def initializeMatcherHandler(serviceInfo):
""" Matcher Service initialization
"""
global gJobDB
global gTaskQueueDB
global jlDB
global pilotAgentsDB
gJobDB = JobDB()
gTaskQueueDB = TaskQueueDB()
jlDB = JobLoggingDB()
pilotAgentsDB = PilotAgentsDB()
gMonitor.registerActivity('matchTime', "Job matching time",
'Matching', "secs", gMonitor.OP_MEAN, 300)
gMonitor.registerActivity('matchesDone', "Job Match Request",
'Matching', "matches", gMonitor.OP_RATE, 300)
gMonitor.registerActivity('matchesOK', "Matched jobs",
'Matching', "matches", gMonitor.OP_RATE, 300)
gMonitor.registerActivity('numTQs', "Number of Task Queues",
'Matching', "tqsk queues", gMonitor.OP_MEAN, 300)
gTaskQueueDB.recalculateTQSharesForAll()
gThreadScheduler.addPeriodicTask(120, gTaskQueueDB.recalculateTQSharesForAll)
gThreadScheduler.addPeriodicTask(60, sendNumTaskQueues)
sendNumTaskQueues()
return S_OK()
def sendNumTaskQueues():
result = gTaskQueueDB.getNumTaskQueues()
if result['OK']:
gMonitor.addMark('numTQs', result['Value'])
else:
gLogger.error("Cannot get the number of task queues", result['Message'])
class MatcherHandler(RequestHandler):
def initialize(self):
self.limiter = Limiter(jobDB=gJobDB)
##############################################################################
types_requestJob = [[basestring, dict]]
def export_requestJob(self, resourceDescription):
""" Serve a job to the request of an agent which is the highest priority
one matching the agent's site capacity
"""
resourceDescription['Setup'] = self.serviceInfoDict['clientSetup']
credDict = self.getRemoteCredentials()
try:
opsHelper = Operations(group=credDict['group'])
matcher = Matcher(pilotAgentsDB=pilotAgentsDB,
jobDB=gJobDB,
tqDB=gTaskQueueDB,
jlDB=jlDB,
opsHelper=opsHelper)
result = matcher.selectJob(resourceDescription, credDict)
except RuntimeError as rte:
self.log.error("Error requesting job: ", rte)
return S_ERROR("Error requesting job")
# result can be empty, meaning that no job matched
if result:
gMonitor.addMark("matchesDone")
gMonitor.addMark("matchesOK")
return S_OK(result)
# FIXME: This is correctly interpreted by the JobAgent, but DErrno should be used instead
return S_ERROR("No match found")
##############################################################################
types_getActiveTaskQueues = []
@staticmethod
def export_getActiveTaskQueues():
""" Return all task queues
"""
return gTaskQueueDB.retrieveTaskQueues()
##############################################################################
types_getMatchingTaskQueues = [dict]
def export_getMatchingTaskQueues(self, resourceDict):
""" Return all task queues
"""
if 'Site' in resourceDict and isinstance(resourceDict['Site'], basestring):
negativeCond = self.limiter.getNegativeCondForSite(resourceDict['Site'])
else:
negativeCond = self.limiter.getNegativeCond()
matcher = Matcher(pilotAgentsDB=pilotAgentsDB,
jobDB=gJobDB,
tqDB=gTaskQueueDB,
jlDB=jlDB)
resourceDescriptionDict = matcher._processResourceDescription(resourceDict)
return gTaskQueueDB.retrieveTaskQueuesThatMatch(resourceDescriptionDict, negativeCond=negativeCond)
##############################################################################
types_matchAndGetTaskQueue = [dict]
@staticmethod
def export_matchAndGetTaskQueue(resourceDict):
""" Return matching task queues
"""
return gTaskQueueDB.matchAndGetTaskQueue(resourceDict)
|
arrabito/DIRAC
|
WorkloadManagementSystem/Service/MatcherHandler.py
|
Python
|
gpl-3.0
| 4,882
|
[
"DIRAC"
] |
2bdb53b5c27aee1e46e85f76276b8f9297a7c99fde9db0a180aff3acf4ce6315
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
import sys
grammar = """
#grammar HTML
document = (tag / anyword)*
# document = (doctype / text / tag)*
# tag = open_tag (text / tag)* close_tag
open_tag = "<" ~"[\w\W]" ">"
# close_tag = "</" ~"[\w\W]+" ">"
# doctype = "<!DOCTYPE " ~"[\w\W]+" ">"
# text = ~"[^<]+"
#tag = ~"<([a-z]+)([^<]+)*(?:>(.*)<\/\1>|\s+\/>)"
tag = ~"<([A-Z][A-Z0-9]*)\b[^>]*>(.*?)</\1>"
anyword = ~"[\w\W]*" ws*
ws = ~"[\s]+"
"""
class EntryVisitor(NodeVisitor):
def __init__(self, grammar, text):
self.entry = {}
ast = Grammar(grammar).parse(text)
self.visit(ast)
def visit_text(self, n, vc):
self.entry['text'] = n.text
def visit_doctype(self, n, vc):
self.entry['doctype'] = n.text
# def visit_anyword(self, n, vc):
# self.entry['anyword'] = n.text
# def visit_variable(self, n, vc):
# self.entry['variable'] = n.text
# def visit_literal(self, n, vc):
# self.entry['literal'] = n.text
def generic_visit(self, n, vc):
pass
grammar = Grammar(grammar)
filename = sys.argv[1]
with open(filename) as file:
contents = file.read()
print( grammar.parse(contents) )
#print(contents)
#EntryVisitor(grammar, contents).entry
print( EntryVisitor(grammar, contents).entry )
#
#for line in calc_text.splitlines():
# print( EntryVisitor(grammar, line).entry )
#for line in text.splitlines():
# print( EntryParser(grammar, line).entry )
|
codeyash/plugins
|
PyPlugins/PhpParser/py/html.py
|
Python
|
apache-2.0
| 1,709
|
[
"VisIt"
] |
1849b5662531bdec7cc078c02edd77b4267d926a1e7667c829da949aa4aef98d
|
""" Some utilities for FTS3...
"""
import json
import datetime
import random
import threading
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.Decorators import deprecated
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
def _checkSourceReplicas(ftsFiles):
""" Check the active replicas
:params ftsFiles: list of FT3Files
:returns: Successful/Failed {lfn : { SE1 : PFN1, SE2 : PFN2 } , ... }
"""
lfns = list(set([f.lfn for f in ftsFiles]))
res = DataManager().getActiveReplicas(lfns)
return res
def selectUniqueRandomSource(ftsFiles, allowedSources=None):
"""
For a list of FTS3files object, select a random source, and group the files by source.
:param allowedSources: list of allowed sources
:param ftsFiles: list of FTS3File object
:return: S_OK({ sourceSE: [ FTS3Files] })
"""
_log = gLogger.getSubLogger("selectUniqueRandomSource")
allowedSourcesSet = set(allowedSources) if allowedSources else set()
# destGroup will contain for each target SE a dict { source : [list of FTS3Files] }
groupBySource = {}
# For all files, check which possible sources they have
res = _checkSourceReplicas(ftsFiles)
if not res['OK']:
return res
filteredReplicas = res['Value']
for ftsFile in ftsFiles:
if ftsFile.lfn in filteredReplicas['Failed']:
_log.error("Failed to get active replicas", "%s,%s" %
(ftsFile.lfn, filteredReplicas['Failed'][ftsFile.lfn]))
continue
replicaDict = filteredReplicas['Successful'][ftsFile.lfn]
# Only consider the allowed sources
# If we have a restriction, apply it, otherwise take all the replicas
allowedReplicaSource = (set(replicaDict) & allowedSourcesSet) if allowedSourcesSet else replicaDict
# pick a random source
randSource = random.choice(list(allowedReplicaSource)) # one has to convert to list
groupBySource.setdefault(randSource, []).append(ftsFile)
return S_OK(groupBySource)
def groupFilesByTarget(ftsFiles):
"""
For a list of FTS3files object, group the Files by target
:param ftsFiles: list of FTS3File object
:return: {targetSE : [ ftsFiles] } }
"""
# destGroup will contain for each target SE a dict { possible source : transfer metadata }
destGroup = {}
for ftsFile in ftsFiles:
destGroup.setdefault(ftsFile.targetSE, []).append(ftsFile)
return S_OK(destGroup)
class FTS3Serializable(object):
""" This is the base class for all the FTS3 objects that
needs to be serialized, so FTS3Operation, FTS3File
and FTS3Job
The inheriting classes just have to define a class
attribute called _attrToSerialize, which is a list of
strings, which correspond to the name of the attribute
they want to serialize
"""
_datetimeFormat = '%Y-%m-%d %H:%M:%S'
# MUST BE OVERWRITTEN IN THE CHILD CLASS
_attrToSerialize = []
def toJSON(self, forPrint=False):
""" Returns the JSON formated string
:param forPrint: if set to True, we don't include
the 'magic' arguments used for rebuilding the
object
"""
jsonStr = json.dumps(self, cls=FTS3JSONEncoder, forPrint=forPrint)
return jsonStr
def __str__(self):
import pprint
js = json.loads(self.toJSON(forPrint=True))
return pprint.pformat(js)
def _getJSONData(self, forPrint=False):
""" Returns the data that have to be serialized by JSON
:param forPrint: if set to True, we don't include
the 'magic' arguments used for rebuilding the
object
:return: dictionary to be transformed into json
"""
jsonData = {}
datetimeAttributes = []
for attrName in self._attrToSerialize:
# IDs might not be set since it is managed by SQLAlchemy
if not hasattr(self, attrName):
continue
value = getattr(self, attrName)
if isinstance(value, datetime.datetime):
# We convert date time to a string
jsonData[attrName] = value.strftime(self._datetimeFormat)
datetimeAttributes.append(attrName)
else:
jsonData[attrName] = value
if not forPrint:
jsonData['__type__'] = self.__class__.__name__
jsonData['__module__'] = self.__module__
jsonData['__datetime__'] = datetimeAttributes
return jsonData
class FTS3JSONEncoder(json.JSONEncoder):
""" This class is an encoder for the FTS3 objects
"""
def __init__(self, *args, **kwargs):
if 'forPrint' in kwargs:
self._forPrint = kwargs.pop('forPrint')
else:
self._forPrint = False
super(FTS3JSONEncoder, self).__init__(*args, **kwargs)
def default(self, obj): # pylint: disable=method-hidden
if hasattr(obj, '_getJSONData'):
return obj._getJSONData(forPrint=self._forPrint)
else:
return json.JSONEncoder.default(self, obj)
class FTS3JSONDecoder(json.JSONDecoder):
""" This class is an decoder for the FTS3 objects
"""
def __init__(self, *args, **kargs):
json.JSONDecoder.__init__(self, object_hook=self.dict_to_object,
*args, **kargs)
def dict_to_object(self, dataDict):
""" Convert the dictionary into an object """
import importlib
# If it is not an FTS3 object, just return the structure as is
if not ('__type__' in dataDict and '__module__' in dataDict):
return dataDict
# Get the class and module
className = dataDict.pop('__type__')
modName = dataDict.pop('__module__')
datetimeAttributes = dataDict.pop('__datetime__', [])
datetimeSet = set(datetimeAttributes)
try:
# Load the module
mod = importlib.import_module(modName)
# import the class
cl = getattr(mod, className)
# Instantiate the object
obj = cl()
# Set each attribute
for attrName, attrValue in dataDict.iteritems():
# If the value is None, do not set it
# This is needed to play along well with SQLalchemy
if attrValue is None:
continue
if attrName in datetimeSet:
attrValue = datetime.datetime.strptime(attrValue, FTS3Serializable._datetimeFormat)
setattr(obj, attrName, attrValue)
return obj
except Exception as e:
gLogger.error('exception in FTS3JSONDecoder %s for type %s' % (e, className))
dataDict['__type__'] = className
dataDict['__module__'] = modName
dataDict['__datetime__'] = datetimeAttributes
return dataDict
threadLocal = threading.local()
class FTS3ServerPolicy(object):
"""
This class manages the policy for choosing a server
"""
def __init__(self, serverDict, serverPolicy="Random"):
"""
Call the init of the parent, and initialize the list of FTS3 servers
"""
self.log = gLogger.getSubLogger("FTS3ServerPolicy")
self._serverDict = serverDict
self._serverList = serverDict.keys()
self._maxAttempts = len(self._serverList)
self._nextServerID = 0
self._resourceStatus = ResourceStatus()
methName = "_%sServerPolicy" % serverPolicy.lower()
if not hasattr(self, methName):
self.log.error('Unknown server policy %s. Using Random instead' % serverPolicy)
methName = "_randomServerPolicy"
self._policyMethod = getattr(self, methName)
def _failoverServerPolicy(self, _attempt):
"""
Returns always the server at a given position (normally the first one)
:param attempt: position of the server in the list
"""
if _attempt >= len(self._serverList):
raise Exception(
"FTS3ServerPolicy.__failoverServerPolicy: attempt to reach non existing server index")
return self._serverList[_attempt]
def _sequenceServerPolicy(self, _attempt):
"""
Every time the this policy is called, return the next server on the list
"""
fts3server = self._serverList[self._nextServerID]
self._nextServerID = (self._nextServerID + 1) % len(self._serverList)
return fts3server
def _randomServerPolicy(self, _attempt):
"""
return a server from shuffledServerList
"""
if getattr(threadLocal, 'shuffledServerList', None) is None:
threadLocal.shuffledServerList = self._serverList[:]
random.shuffle(threadLocal.shuffledServerList)
fts3Server = threadLocal.shuffledServerList[_attempt]
if _attempt == self._maxAttempts - 1:
random.shuffle(threadLocal.shuffledServerList)
return fts3Server
def _getFTSServerStatus(self, ftsServer):
""" Fetch the status of the FTS server from RSS """
res = self._resourceStatus.getElementStatus(ftsServer, 'FTS')
if not res['OK']:
return res
result = res['Value']
if ftsServer not in result:
return S_ERROR("No FTS Server %s known to RSS" % ftsServer)
if result[ftsServer]['all'] == 'Active':
return S_OK(True)
return S_OK(False)
def chooseFTS3Server(self):
"""
Choose the appropriate FTS3 server depending on the policy
"""
fts3Server = None
attempt = 0
while not fts3Server and attempt < self._maxAttempts:
fts3Server = self._policyMethod(attempt)
res = self._getFTSServerStatus(fts3Server)
if not res['OK']:
self.log.warn("Error getting the RSS status for %s: %s" % (fts3Server, res))
fts3Server = None
attempt += 1
continue
ftsServerStatus = res['Value']
if not ftsServerStatus:
self.log.warn('FTS server %s is not in good shape. Choose another one' % fts3Server)
fts3Server = None
attempt += 1
if fts3Server:
return S_OK(self._serverDict[fts3Server])
return S_ERROR("Could not find an FTS3 server (max attempt reached)")
|
fstagni/DIRAC
|
DataManagementSystem/private/FTS3Utilities.py
|
Python
|
gpl-3.0
| 9,882
|
[
"DIRAC"
] |
cae5aa31f6e78d09052fec3cfdad8d38ac7211f8b0a880f411b4295ffbacc9b1
|
from forte.solvers import solver_factory, HF, CallbackHandler
def test_hf_callback():
"""Example of using a callback to localize HF MOs."""
xyz = """
H 0.0 0.0 0.0
H 0.0 0.0 1.0
H 0.0 0.0 2.0
H 0.0 0.0 3.0
symmetry c1
"""
input = solver_factory(molecule=xyz, basis='sto-3g')
state = input.state(charge=0, multiplicity=1)
cbh = CallbackHandler()
def localize(cb, state):
"""Localize the orbitals after a HF computation"""
import psi4
wfn = state.data.psi_wfn
basis_ = wfn.basisset()
C = wfn.Ca_subset("AO", "ALL")
Local = psi4.core.Localizer.build("PIPEK_MEZEY", basis_, C)
Local.localize()
new_C = Local.L
wfn.Ca().copy(new_C)
wfn.Cb().copy(new_C)
cbh.add_callback('post hf', localize)
hf = HF(input, state=state, restricted=False, cbh=cbh)
hf.run()
if __name__ == "__main__":
test_hf_callback()
|
evangelistalab/forte
|
tests/pytest/hf/test_hf_callback.py
|
Python
|
lgpl-3.0
| 948
|
[
"Psi4"
] |
6c24f28236d82fc9a056d7978ed63e02ee429075b834758ca4c6f0c564ee0c10
|
#!/usr/bin/env python3
""" Computer-based immigration office for Kanadia """
__author__ = 'Susan Sim'
__author__ = 'Zhao'
__email__ = "ses@drsusansim.org"
__copyright__ = "2014 Susan Sim"
__license__ = "MIT License"
__status__ = "Prototype"
# imports one per line
import re
import datetime
import json
def decide(input_file, watchlist_file, countries_file):
"""
Decides whether a traveller's entry into Kanadia should be accepted
:param input_file: The name of a JSON formatted file that contains cases to decide
:param watchlist_file: The name of a JSON formatted file that contains names and passport numbers on a watchlist
:param countries_file: The name of a JSON formatted file that contains country data, such as whether
an entry or transit visa is required, and whether there is currently a medical advisory
:return: List of strings. Possible values of strings are: "Accept", "Reject", "Secondary", and "Quarantine"
"""
mark = ""
result = []
#Read test_file, watchlist, country_list from JSON files
try:
with open(input_file) as file_reader:
test_file = file_reader.read()
test_file = json.loads(test_file)
with open(watchlist_file) as file_reader:
watchlist = file_reader.read()
watchlist = json.loads(watchlist)
with open(countries_file) as file_reader:
country_list = file_reader.read()
country_list = json.loads(country_list)
except:
print("File not found")
raise FileNotFoundError
for entry in test_file:
# Check if the entry record is complete
if valid_date_format(entry["birth_date"]) and \
valid_passport_format(entry["passport"]) and \
valid_reason_format(entry["entry_reason"]) and \
valid_location_format(entry["home"], entry["from"])and \
valid_name_format(entry["first_name"], entry["last_name"]):
# Check if entry record matches information in country_list
for country_key, country_val in country_list.items():
# Check if need quarantine
if entry["from"]["country"].upper() == country_key and \
country_val["medical_advisory"]:
mark = ["Quarantine"]
# Check if traveller needs a valid transit or visit visa
elif entry["home"]["country"].upper() == country_key:
if (country_val["visitor_visa_required"] == "1" and
entry["entry_reason"] == "visit") or (country_val[
"transit_visa_required"] == "1" and
entry["entry_reason"] == "transit"):
# Check if the visa is valid
if valid_visa(entry["visa"]):
mark = ["Accept"]
else:
mark = ["Reject"]
else:
mark = ["Accept"]
if mark != ["Quarantine"]:
# Check to see if traveller is a returning citizen
if entry["entry_reason"] == "returning" and \
entry["home"]["country"].upper() == "KAN":
mark = ["Accept"]
# Check to see if entry record is in watchlist
for info in watchlist:
if entry["passport"].upper() == info["passport"].upper()\
or(entry["first_name"].upper() == info[
"first_name"].upper() and
entry["last_name"].upper() == info[
"last_name"].upper()):
mark = ["Secondary"]
result += mark
mark = ""
# Return reject if entry record is not complete
else:
result += ["Reject"]
return result
def valid_passport_format(passport_number):
"""
Checks whether a passport number is five sets of
five alpha-number characters separated by dashes
:param passport_number: alpha-numeric string
:return: Boolean; True if the format is valid, False otherwise
"""
# Checks to see if passport number is correct format
passport_format = re.compile('^\w{5}-\w{5}-\w{5}-\w{5}-\w{5}$')
if passport_format.match(passport_number):
return True
else:
return False
def valid_visa(visa):
"""
Checks whether a visa data is two sets of five alpha-number characters
separated by dashes and is less than two years old
:param visa: alpha-numeric string
:return: Boolean; True if the format is valid and not expired,
False otherwise
"""
# Checks to see if visa is correct format
visa_format = re.compile('^\w{5}-\w{5}$')
# Check if the visa is in valid format
if visa_format.match(visa["code"]) and valid_date_format(visa["date"]):
# Check if the visa is less than 2 years old
if (datetime.datetime.today() - datetime.datetime.strptime(
visa["date"], "%Y-%m-%d")) < datetime.timedelta(730):
return True
else:
return False
else:
return False
def valid_date_format(date_string):
"""
Checks whether a date has the format YYYY-mm-dd in numbers
:param date_string: date to be checked
:return: Boolean True if the format is valid, False otherwise
"""
# Checks to see if date is in correct format
try:
datetime.datetime.strptime(date_string, '%Y-%m-%d')
return True
except ValueError:
return False
def valid_name_format(first_name, last_name):
"""
Checks whether first name and last name both use alphabetical characters
:param first_name: alphabetical string
:param last_name: alphabetical string
:return: Boolean; True if valid, False otherwise
"""
# Checks to see if first an last name are in correct alphabetical
# character format
if first_name.isalpha() and last_name.isalpha():
return True
else:
return False
def valid_location_format(home_location, from_location):
"""
Checks whether home location and from location are on list of existing
countries
:param home_location: predetermined 3-letter country code
:param from_location: predetermined 3-letter country code
:return: Boolean; True if valid, False otherwise
"""
# Checks to see whether both the home country and from country are on the
# preapproved list of countries
preapproved_countries = ("ALB", "BRD", "CFR", "DSK", "ELE", "FRY", "GOR",
"HJR", "III", "JIK", "KAN", "KRA", "LUG")
if home_location["country"].upper() in preapproved_countries and \
from_location["country"].upper() in preapproved_countries:
return True
else:
return False
def valid_reason_format(entry_reason):
"""
Checks whether reason for entry either returning, transit, or visa
:param entry_reason: returning, transit, or visa
:return: Boolean; True if valid, False otherwise
"""
# Checks to see whether entry reason returning, transit, or visit
reasons_for_entry = ("returning", "transit", "visit")
if entry_reason in reasons_for_entry:
return True
else:
return False
|
ZhaoH/inf1340_2014_asst2_Halie_Zhao
|
papers.py
|
Python
|
mit
| 7,417
|
[
"VisIt"
] |
e04c6bd957be5b89626cf335be575cec34c957cced62d7d64cf13b070959d75f
|
from __future__ import division
from __future__ import print_function
import argparse
from astropy.io import fits
import json
import numpy as np
from numpy.polynomial import Polynomial
import os
from uuid import uuid4
from numina.array.display.polfit_residuals import polfit_residuals
from numina.array.display.polfit_residuals import \
polfit_residuals_with_sigma_rejection
from numina.array.display.pause_debugplot import pause_debugplot
from numina.array.display.ximplotxy import ximplotxy
# from numina.array.wavecalib.arccalibration import fit_list_of_wvfeatures
from numina.array.wavecalib.check_wlcalib import match_wv_arrays
from numina.array.wavecalib.peaks_spectrum import find_peaks_spectrum
from numina.array.wavecalib.peaks_spectrum import refine_peaks_spectrum
# from numina.array.wavecalib.solutionarc import WavecalFeature
from numina.array.display.pause_debugplot import DEBUGPLOT_CODES
def filter_bad_fits(wlcalib_file, times_sigma_reject, debugplot):
"""Exctract useful information from master_wlcalib.
Obtain the variation of each coefficient of the wavelength
calibration polynomial as a function of the fiber number (assuming
that the first fiber is fibid=1 and not 0).
Parameters
----------
wlcalib_file : file handler
JSON file containing the initial wavelength calibration.
times_sigma_reject : float
Times sigma to reject points in fits.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
poldeg : int
Polynomial degree (must be the same for all the fibers).
list_poly: list of polynomial instances
List containing the polynomial variation of each wavelength
calibration polynomical coefficient as a function of the fiber
number.
"""
reject_all = [None] # avoid PyCharm warning
megadict = json.loads(open(wlcalib_file.name).read())
contents_list = megadict['contents']
fibid = np.array([contents['fibid'] for contents in contents_list])
poldeg = [len(contents['solution']['coeff']) for contents in contents_list]
if len(set(poldeg)) == 1:
poldeg = poldeg[0] - 1
else:
raise ValueError("Non unique polynomial degree!")
if abs(debugplot) >= 10:
print('Polynomial degree:', poldeg)
# determine bad fits from each independent polynomial coefficient
for i in range(poldeg + 1):
coeff = np.array([contents['solution']['coeff'][i] for
contents in contents_list])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=fibid,
y=coeff,
deg=5,
times_sigma_reject=times_sigma_reject,
)
if abs(debugplot) % 10 != 0:
polfit_residuals(x=fibid, y=coeff, deg=5, reject=reject,
xlabel='fibid',
ylabel='coeff a_' + str(i),
title='Identifying bad fits',
debugplot=debugplot)
if i == 0:
reject_all = np.copy(reject)
if abs(debugplot) >= 10:
print('coeff a_' + str(i) + ': ', sum(reject_all))
else:
# add new bad fits
reject_all = np.logical_or(reject_all, reject)
if abs(debugplot) >= 10:
print('coeff a_' + str(i) + ': ', sum(reject_all))
# determine new fits excluding all fibers with bad fits
poly_list = []
for i in range(poldeg + 1):
coeff = np.array([contents['solution']['coeff'][i] for
contents in contents_list])
poly, yres = polfit_residuals(
x=fibid,
y=coeff,
deg=5,
reject=reject_all,
xlabel='fibid',
ylabel='coeff a_' + str(i),
title='Computing filtered fits',
debugplot=debugplot
)
poly_list.append(poly)
return poldeg, poly_list
def refine_wlcalib(arc_rss, linelist, poldeg, list_poly, npix=2,
times_sigma_reject=5, debugplot=0):
"""Refine wavelength calibration using expected polynomial in each fiber.
Parameters
----------
arc_rss : file handler
FITS file containing the uncalibrated RSS.
linelist : file handler
ASCII file with the detailed list of expected arc lines.
poldeg : int
Polynomial degree (must be the same for all the fibers).
list_poly: list of polynomial instances
List containing the polynomial variation of each wavelength
calibration polynomial coefficient as a function of the fiber
number.
npix : int
Number of pixels around each peak where the expected wavelength
must match the tabulated wavelength in the master list.
times_sigma_reject : float
Times sigma to reject points in fits.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
missing_fibers : list
List of missing fibers
contents : list of dictionaries
Contents including the refined wavelength calibration. This
list should replace the 'contents' section of the JSON file
containing the master_wlcalib results.
"""
# read input FITS file
hdulist = fits.open(arc_rss)
image2d = hdulist[0].data
hdulist.close()
naxis2, naxis1 = image2d.shape
if abs(debugplot) >= 10:
print('>>> Reading file:', arc_rss.name)
print('>>> NAXIS1:', naxis1)
print('>>> NAXIS2:', naxis2)
# read list of expected arc lines
master_table = np.genfromtxt(linelist)
wv_master = master_table[:, 0]
if abs(debugplot) >= 10:
print('wv_master:', wv_master)
# abscissae for plots
xp = np.arange(1, naxis1 + 1)
# initialized output lists
missing_fibers = []
contents = []
# loop in naxis2
for ifib in range(1, naxis2 + 1):
sp = image2d[ifib - 1, :]
# find initial line peaks
nwinwidth_initial = 7
ixpeaks = find_peaks_spectrum(sp, nwinwidth=nwinwidth_initial)
# check there are enough lines for fit
if len(ixpeaks) <= poldeg:
print('WARNING: fibid, number of peaks:', ifib, len(ixpeaks))
missing_fibers.append(ifib)
else:
# refine location of line peaks
nwinwidth_refined = 5
fxpeaks, sxpeaks = refine_peaks_spectrum(
sp, ixpeaks,
nwinwidth=nwinwidth_refined,
method="gaussian"
)
if abs(debugplot) >= 10:
print(">>> Number of lines found:", len(fxpeaks))
# expected wavelength calibration polynomial for current fiber
coeff = np.zeros(poldeg + 1)
for k in range(poldeg + 1):
dumpol = list_poly[k]
coeff[k] = dumpol(ifib)
wlpol = Polynomial(coeff)
if abs(debugplot) >= 10:
print(">>> Expected calibration polynomial:", wlpol)
# expected wavelength of all identified peaks
xchannel = fxpeaks + 1.0
wv_expected_all_peaks = wlpol(xchannel)
if abs(debugplot) in [21, 22]:
for dum in zip(xchannel, wv_expected_all_peaks):
print('x, w:', dum)
pause_debugplot(debugplot)
# assign individual arc lines from master list to spectrum
# line peaks when the expected wavelength is within the maximum
# allowed range (+/- npix around the peak)
crmin1_linear = wlpol(1)
crmax1_linear = wlpol(naxis1)
cdelt1_linear = (crmax1_linear - crmin1_linear) / (naxis1 - 1)
delta_wv_max = npix * cdelt1_linear
# iteration #1: find overall offset
wv_verified_all_peaks = match_wv_arrays(
wv_master,
wv_expected_all_peaks,
delta_wv_max=delta_wv_max
)
lines_ok = np.where(wv_verified_all_peaks > 0)
wv_offsets_all_peaks = wv_verified_all_peaks-wv_expected_all_peaks
overall_offset = np.median(wv_offsets_all_peaks[lines_ok])
# iteration #2: use previous overall offset
wv_expected_all_peaks += overall_offset
wv_verified_all_peaks = match_wv_arrays(
wv_master,
wv_expected_all_peaks,
delta_wv_max=delta_wv_max
)
# fit with sigma rejection
lines_ok = np.where(wv_verified_all_peaks > 0)
xdum = (fxpeaks + 1.0)[lines_ok]
ydum = wv_verified_all_peaks[lines_ok]
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=xdum,
y=ydum,
deg=poldeg,
times_sigma_reject=times_sigma_reject,
debugplot=debugplot
)
# effective number of points
npoints_eff = np.sum(np.logical_not(reject))
# residual standard deviation
sum_res2 = np.sum(yres[np.logical_not(reject)]**2)
residual_std = np.sqrt(sum_res2/(npoints_eff - poldeg - 1))
if True: # abs(debugplot) >= 10:
print("ifib, npoints_eff, residual_std:",
ifib, npoints_eff, residual_std)
print(" poly.coef:", poly.coef)
# generate dictionary with results associated with current fiber
crmin1_linear = poly(1)
crmax1_linear = poly(naxis1)
cdelt1_linear = (crmax1_linear - crmin1_linear) / (naxis1 - 1)
dumdict = {
'fibid': ifib,
'solution': {
'cr_linear': {
'crpix': 1.0,
'crmin': crmin1_linear,
'crmax': crmax1_linear,
'crval': crmin1_linear,
'cdelt': cdelt1_linear
},
'coeff': [poly.coef[k] for k in range(poldeg + 1)],
'features': [],
'npoints_eff': npoints_eff,
'residual_std': residual_std
}
}
contents.append(dumdict)
"""
# generate list of features
xdum = xdum[np.logical_not(reject)]
ydum = ydum[np.logical_not(reject)]
list_of_wvfeatures = []
for i in range(len(xdum)):
wvfeature = WavecalFeature(
line_ok=True,
category="",
lineid=-1,
funcost=0.0,
xpos=xdum[i],
ypos=0.0,
peak=0.0,
fwhm=0.0,
reference=ydum[i]
)
list_of_wvfeatures.append(wvfeature)
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poldeg,
weighted=False,
debugplot=debugplot,
plot_title=arc_rss.name + ' [fiber #' + str(ifib) + ']\n' +
linelist.name
)
if True: #abs(debugplot) >= 10:
print("ifib, solution_wv.coeff:\n", ifib, solution_wv.coeff)
"""
# display spectrum and peaks
if abs(debugplot) % 10 != 0:
title = arc_rss.name + ' [fiber #' + str(ifib) + ']'
ax = ximplotxy(xp, sp,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
title=title,
show=False, debugplot=debugplot)
ymin = sp.min()
ymax = sp.max()
dy = ymax - ymin
ymin -= dy / 20.
ymax += dy / 20.
ax.set_ylim([ymin, ymax])
# mark peak location
ax.plot(ixpeaks + 1, sp[ixpeaks], 'bo',
label="initial location")
ax.plot(fxpeaks + 1, sp[ixpeaks], 'go',
label="refined location")
ax.plot((fxpeaks + 1)[lines_ok], sp[ixpeaks][lines_ok], 'mo',
label="identified lines")
for i in range(len(ixpeaks)):
if wv_verified_all_peaks[i] > 0:
ax.text(fxpeaks[i] + 1.0, sp[ixpeaks[i]],
wv_verified_all_peaks[i], fontsize=8,
horizontalalignment='center')
# legend
ax.legend(numpoints=1)
# show plot
pause_debugplot(debugplot, pltshow=True)
return missing_fibers, contents
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser(prog='refine_master_wlcalib')
# positional parameters
parser.add_argument("uncalibrated_arc_rss",
help="FITS image containing wavelength uncalibrated "
"RSS",
type=argparse.FileType('r'))
parser.add_argument("wlcalib_file",
help="JSON file with initial wavelength calibration",
type=argparse.FileType('r'))
parser.add_argument("linelist",
help="ASCII file with detailed list of expected "
"arc lines",
type=argparse.FileType('r'))
parser.add_argument("--debugplot",
help="integer indicating plotting/debugging" +
" (default=0)",
type=int, default=0,
choices=DEBUGPLOT_CODES)
args = parser.parse_args(args=args)
poldeg, list_poly = filter_bad_fits(
args.wlcalib_file,
times_sigma_reject=5.0,
debugplot=args.debugplot
)
missing_fibers, contents = refine_wlcalib(
args.uncalibrated_arc_rss,
args.linelist,
poldeg,
list_poly,
npix=2,
times_sigma_reject=5.0,
debugplot=args.debugplot
)
megadict = json.loads(open(args.wlcalib_file.name).read())
megadict['missing_fibers'] = missing_fibers
megadict['contents'] = contents
megadict['uuid'] = str(uuid4())
outfile = os.path.basename(args.wlcalib_file.name) + '_refined'
print("Generating: " + outfile)
with open(outfile, 'w') as fstream:
json.dump(megadict, fstream, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
|
nicocardiel/xmegara
|
refine_master_wlcalib.py
|
Python
|
gpl-3.0
| 14,977
|
[
"Gaussian"
] |
841e7e55c109a01654622575ec4aba233a021a3740e315cdac77adb3698edbf3
|
#!/usr/bin/python
# coding: utf-8
# Ubuntu Tweak - PyGTK based desktop configuration tool
#
# Copyright (C) 2007-2008 TualatriX <tualatrix@gmail.com>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import re
import json
import time
import urllib
import thread
import apt_pkg
import logging
import gettext
import subprocess
from urllib2 import urlopen, Request, URLError
from gettext import ngettext
from aptsources.sourceslist import SourcesList
from gi.repository import Gtk, Gdk, GdkPixbuf
from gi.repository import Pango
from gi.repository import GObject
from gi.repository import Notify
from ubuntutweak import system
from ubuntutweak.common import consts
from ubuntutweak.common.debug import log_func
from ubuntutweak.modules import TweakModule
from ubuntutweak.policykit.dbusproxy import proxy
from ubuntutweak.gui.widgets import CheckButton
from ubuntutweak.gui.dialogs import QuestionDialog, ErrorDialog, InfoDialog, WarningDialog
from ubuntutweak.gui.gtk import post_ui, set_busy, unset_busy
from ubuntutweak.utils.parser import Parser
from ubuntutweak.network import utdata
from ubuntutweak.settings.gsettings import GSetting
from ubuntutweak.utils import set_label_for_stock_button
from ubuntutweak.utils import ppa, icon
from ubuntutweak.utils.package import AptWorker
from ubuntutweak.apps import CategoryView
from ubuntutweak.admins.appcenter import AppView, AppParser, StatusProvider
from ubuntutweak.admins.appcenter import CheckUpdateDialog, FetchingDialog, PackageInfo
log = logging.getLogger("SourceCenter")
APP_PARSER = AppParser()
PPA_MIRROR = []
UNCONVERT = False
WARNING_KEY = 'com.ubuntu-tweak.apps.disable-warning'
CONFIG = GSetting(key=WARNING_KEY)
UPDATE_SETTING = GSetting(key='com.ubuntu-tweak.apps.sources-can-update', type=bool)
VERSION_SETTING = GSetting(key='com.ubuntu-tweak.apps.sources-version', type=str)
SOURCE_ROOT = os.path.join(consts.CONFIG_ROOT, 'sourcecenter')
SOURCE_VERSION_URL = utdata.get_version_url('/sourcecenter_version/')
UPGRADE_DICT = {}
def get_source_data_url():
return utdata.get_download_url('/media/utdata/sourcecenter-%s.tar.gz' %
VERSION_SETTING.get_value())
def get_source_logo_from_filename(file_name):
path = os.path.join(SOURCE_ROOT, file_name)
if not os.path.exists(path) or file_name == '':
path = os.path.join(consts.DATA_DIR, 'pixmaps/ppa-logo.png')
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
if pixbuf.get_width() != 32 or pixbuf.get_height() != 32:
pixbuf = pixbuf.scale_simple(32, 32, GdkPixbuf.InterpType.BILINEAR)
return pixbuf
except:
return Gtk.IconTheme.get_default().load_icon(Gtk.STOCK_MISSING_IMAGE, 32, 0)
class CheckSourceDialog(CheckUpdateDialog):
def get_updatable(self):
return utdata.check_update_function(self.url, SOURCE_ROOT, \
UPDATE_SETTING, VERSION_SETTING, \
auto=False)
class DistroParser(Parser):
def __init__(self):
super(DistroParser, self).__init__(os.path.join(SOURCE_ROOT, 'distros.json'), 'id')
def get_codename(self, key):
return self[key]['codename']
class SourceParser(Parser):
def __init__(self):
super(SourceParser, self).__init__(os.path.join(SOURCE_ROOT, 'sources.json'), 'id')
def init_items(self, key):
self.reverse_depends = {}
distro_parser = DistroParser()
for item in self.get_data():
distro_values = ''
if item['fields'].has_key('distros'):
distros = item['fields']['distros']
for id in distros:
codename = distro_parser.get_codename(id)
if codename in system.UBUNTU_CODENAMES:
if system.CODENAME == codename:
distro_values = codename
break
else:
distro_values = codename
break
if distro_values == '':
continue
item['fields']['id'] = item['pk']
item['fields']['distro'] = distro_values
self[item['fields'][key]] = item['fields']
UPGRADE_DICT[item['fields']['url']] = distro_values
id = item['pk']
fields = item['fields']
if fields.has_key('dependencies') and fields['dependencies']:
for depend_id in fields['dependencies']:
if self.reverse_depends.has_key(depend_id):
self.reverse_depends[depend_id].append(id)
else:
self.reverse_depends[depend_id] = [id]
def has_reverse_depends(self, id):
if id in self.reverse_depends.keys():
return True
else:
return False
def get_reverse_depends(self, id):
return self.reverse_depends[id]
def get_slug(self, key):
return self[key]['slug']
def get_conflicts(self, key):
if self[key].has_key('conflicts'):
return self[key]['conflicts']
else:
return None
def get_dependencies(self, key):
if self[key].has_key('dependencies'):
return self[key]['dependencies']
else:
return None
def get_summary(self, key):
return self.get_by_lang(key, 'summary')
def get_name(self, key):
return self.get_by_lang(key, 'name')
def get_category(self, key):
return self[key]['category']
def get_url(self, key):
return self[key]['url']
def get_key(self, key):
return self[key]['key']
def get_key_fingerprint(self, key):
if self[key].has_key('key_fingerprint'):
return self[key]['key_fingerprint']
else:
return ''
def get_distro(self, key):
return self[key]['distro']
def get_comps(self, key):
return self[key]['component']
def get_website(self, key):
return self[key]['website']
def set_enable(self, key, enable):
# To make other module use the source enable feature, move the logical to here
# So that other module can call
gpg_key = self.get_key(key)
url = self.get_url(key)
distro = self.get_distro(key)
comps = self.get_comps(key)
comment = self.get_name(key)
if ppa.is_ppa(url):
file_name = '%s-%s' % (ppa.get_source_file_name(url), distro)
else:
file_name = self.get_slug(key)
if gpg_key:
proxy.add_apt_key_from_content(gpg_key)
if not comps and distro:
distro = distro + '/'
elif not comps and not distro:
distro = './'
result = proxy.set_separated_entry(url, distro, comps,
comment, enable, file_name)
return str(result)
SOURCE_PARSER = SourceParser()
class SourceStatus(StatusProvider):
def load_objects_from_parser(self, parser):
init = self.get_init()
for key in parser.keys():
id = key
slug = parser.get_slug(key)
key = slug
if init:
log.debug('SourceStatus first init, set %s as read' % id)
self.get_data()['apps'][key] = {}
self.get_data()['apps'][key]['read'] = True
self.get_data()['apps'][key]['cate'] = parser.get_category(id)
else:
if key not in self.get_data()['apps']:
self.get_data()['apps'][key] = {}
self.get_data()['apps'][key]['read'] = False
self.get_data()['apps'][key]['cate'] = parser.get_category(id)
if init and parser.keys():
log.debug('Init finish, SourceStatus set init to False')
self.set_init(False)
self.save()
def get_read_status(self, key):
try:
return self.get_data()['apps'][key]['read']
except:
return True
def set_as_read(self, key):
try:
self.get_data()['apps'][key]['read'] = True
except:
pass
self.save()
class NoNeedDowngradeException(Exception):
pass
class DowngradeView(Gtk.TreeView):
__gsignals__ = {
'checked': (GObject.SignalFlags.RUN_FIRST, None,
(GObject.TYPE_BOOLEAN,)),
'cleaned': (GObject.SignalFlags.RUN_FIRST, None, ())
}
(COLUMN_PKG,
COLUMN_PPA_VERSION,
COLUMN_SYSTEM_VERSION) = range(3)
def __init__(self, plugin):
GObject.GObject.__init__(self)
model = Gtk.ListStore(GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
self.set_model(model)
model.set_sort_column_id(self.COLUMN_PKG, Gtk.SortType.ASCENDING)
self.plugin = plugin
self._add_column()
def _add_column(self):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(_('Package'))
column.pack_start(renderer, False)
column.add_attribute(renderer, 'text', self.COLUMN_PKG)
column.set_sort_column_id(self.COLUMN_PKG)
self.append_column(column)
renderer = Gtk.CellRendererText()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
column = Gtk.TreeViewColumn(_('Previous Version'))
column.pack_start(renderer, True)
column.add_attribute(renderer, 'text', self.COLUMN_PPA_VERSION)
column.set_resizable(True)
column.set_min_width(180)
self.append_column(column)
renderer = Gtk.CellRendererText()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
column = Gtk.TreeViewColumn(_('System Version'))
column.pack_start(renderer, True)
column.add_attribute(renderer, 'text', self.COLUMN_SYSTEM_VERSION)
column.set_resizable(True)
self.append_column(column)
def update_downgrade_model(self, ppas):
model = self.get_model()
model.clear()
pkg_dict = {}
for ppa_url in ppas:
path = ppa.get_list_name(ppa_url)
log.debug('Find the PPA path name: %s', path)
if path:
for line in open(path):
if line.startswith('Package:'):
pkg = line.split()[1].strip()
if pkg in pkg_dict:
# Join another ppa info to the pkg dict, so that
# later we can know if more than two ppa provide
# the pkg
pkg_dict[pkg].extend([ppa_url])
else:
pkg_dict[pkg] = [ppa_url]
pkg_map = self.get_downgradeable_pkgs(pkg_dict)
if pkg_map:
log.debug("Start insert pkg_map to model: %s\n" % str(pkg_map))
for pkg, (p_verion, s_verion) in pkg_map.items():
model.append((pkg, p_verion, s_verion))
def get_downgrade_packages(self):
model = self.get_model()
downgrade_list = []
for row in model:
pkg, version = row[self.COLUMN_PKG], row[self.COLUMN_SYSTEM_VERSION]
downgrade_list.append("%s=%s" % (pkg, version))
log.debug("The package to downgrade is %s" % str(downgrade_list))
return downgrade_list
def get_downgradeable_pkgs(self, ppa_dict):
def is_system_origin(version, urls):
origins = [ppa.get_ppa_origin_name(url) for url in urls]
system_version = 0
match = False
for origin in version.origins:
if origin.origin:
if origin.origin not in origins:
log.debug("The origin %s is not in %s, so end the loop" % (origin.origin, str(origins)))
match = True
break
if match:
system_version = version.version
log.debug("Found match url, the system_version is %s, now iter to system version" % system_version)
return system_version
def is_full_match_ppa_origin(pkg, version, urls):
origins = [ppa.get_ppa_origin_name(url) for url in urls]
ppa_version = 0
match = True
if version == pkg.installed:
for origin in version.origins:
if origin.origin:
if origin.origin not in origins:
log.debug("The origin %s is not in %s, so end the loop" % (origin.origin, str(origins)))
match = False
break
if match:
ppa_version = version.version
log.debug("Found match url, the ppa_version is %s, now iter to system version" % ppa_version)
return ppa_version
log.debug("Check downgrade information")
downgrade_dict = {}
for pkg, urls in ppa_dict.items():
log.debug("The package is: %s, PPA URL is: %s" % (pkg, str(urls)))
if pkg not in AptWorker.get_cache():
log.debug(" package isn't available, continue next...\n")
continue
pkg = AptWorker.get_cache()[pkg]
if not pkg.isInstalled:
log.debug(" package isn't installed, continue next...\n")
continue
versions = pkg.versions
ppa_version = 0
system_version = 0
FLAG = 'PPA'
try:
for version in versions:
try:
#FIXME option to remove the package
log.debug("Version uri is %s" % version.uri)
# Switch FLAG
if FLAG == 'PPA':
ppa_version = is_full_match_ppa_origin(pkg, version, urls)
FLAG = 'SYSTEM'
if ppa_version == 0:
raise NoNeedDowngradeException
else:
system_version = is_system_origin(version, urls)
if ppa_version and system_version:
downgrade_dict[pkg.name] = (ppa_version, system_version)
break
except StopIteration:
pass
except NoNeedDowngradeException:
log.debug("Catch NoNeedDowngradeException, so pass this package: %s" % pkg)
continue
log.debug("\n")
return downgrade_dict
class UpdateView(AppView):
def __init__(self):
AppView.__init__(self)
self.set_headers_visible(False)
def update_model(self, apps):
model = self.get_model()
length = len(apps)
iter = model.append()
model.set(iter,
self.COLUMN_INSTALLED, False,
self.COLUMN_DISPLAY,
'<span size="large" weight="bold">%s</span>' %
ngettext('%d New Application Available',
'%d New Applications Available', length) % length,
)
super(UpdateView, self).update_model(apps)
def update_updates(self, pkgs):
'''apps is a list to iter pkgname,
cates is a dict to find what the category the pkg is
'''
model = self.get_model()
length = len(pkgs)
if pkgs:
iter = model.append()
model.set(iter,
self.COLUMN_DISPLAY,
'<span size="large" weight="bold">%s</span>' %
ngettext('%d Package Update Available',
'%d Package Updates Available',
length) % length)
apps = []
updates = []
for pkg in pkgs:
if pkg in APP_PARSER:
apps.append(pkg)
else:
updates.append(pkg)
for pkgname in apps:
pixbuf = self.get_app_logo(APP_PARSER[pkgname]['logo'])
package = PackageInfo(pkgname)
appname = package.get_name()
desc = APP_PARSER.get_summary(pkgname)
iter = model.append()
model.set(iter,
self.COLUMN_INSTALLED, False,
self.COLUMN_ICON, pixbuf,
self.COLUMN_PKG, pkgname,
self.COLUMN_NAME, appname,
self.COLUMN_DESC, desc,
self.COLUMN_DISPLAY, '<b>%s</b>\n%s' % (appname, desc),
self.COLUMN_TYPE, 'update')
for pkgname in updates:
package = PACKAGE_WORKER.get_cache()[pkgname]
self.append_update(False, package.name, package.summary)
else:
iter = model.append()
model.set(iter,
self.COLUMN_DISPLAY,
'<span size="large" weight="bold">%s</span>' %
_('No Available Package Updates'))
def select_all_action(self, active):
self.to_rm = []
self.to_add = []
model = self.get_model()
model.foreach(self.__select_foreach, active)
self.emit('changed', len(self.to_add))
def __select_foreach(self, model, path, iter, check):
model.set(iter, self.COLUMN_INSTALLED, check)
pkg = model.get_value(iter, self.COLUMN_PKG)
if pkg and check:
self.to_add.append(pkg)
class SourcesView(Gtk.TreeView):
__gsignals__ = {
'sourcechanged': (GObject.SignalFlags.RUN_FIRST, None, ()),
'new_purge': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_PYOBJECT,))
}
(COLUMN_ENABLED,
COLUMN_ID,
COLUMN_CATE,
COLUMN_URL,
COLUMN_DISTRO,
COLUMN_COMPS,
COLUMN_SLUG,
COLUMN_LOGO,
COLUMN_NAME,
COLUMN_COMMENT,
COLUMN_DISPLAY,
COLUMN_HOME,
COLUMN_KEY,
) = range(13)
def __init__(self):
GObject.GObject.__init__(self)
self.filter = None
self.modelfilter = None
self._status = None
self.view_mode = 'view'
self.to_purge = []
self.model = self.__create_model()
self.model.set_sort_column_id(self.COLUMN_NAME, Gtk.SortType.ASCENDING)
self.set_model(self.model)
self.set_search_column(self.COLUMN_NAME)
self._add_column()
self.selection = self.get_selection()
def get_sourceslist(self):
return SourcesList()
def __create_model(self):
model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GdkPixbuf.Pixbuf,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
return model
def on_visible_filter(self, model, iter, data=None):
log.debug("on_visible_filter: %s" % self.model.get_value(iter, self.COLUMN_NAME))
category = self.model.get_value(iter, self.COLUMN_CATE)
if self.filter == None or self.filter == category:
return True
else:
return False
def _add_column(self):
renderer = Gtk.CellRendererToggle()
renderer.connect('toggled', self.on_enable_toggled)
column = Gtk.TreeViewColumn(' ', renderer, active=self.COLUMN_ENABLED)
column.set_sort_column_id(self.COLUMN_ENABLED)
self.append_column(column)
self.source_column = Gtk.TreeViewColumn(_('Third-Party Sources'))
self.source_column.set_sort_column_id(self.COLUMN_NAME)
self.source_column.set_spacing(5)
renderer = Gtk.CellRendererPixbuf()
self.source_column.pack_start(renderer, False)
self.source_column.add_attribute(renderer, 'pixbuf', self.COLUMN_LOGO)
renderer = Gtk.CellRendererText()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
self.source_column.pack_start(renderer, True)
self.source_column.add_attribute(renderer, 'markup', self.COLUMN_DISPLAY)
self.append_column(self.source_column)
def set_status_active(self, active):
if active:
self._status = SourceStatus('sourcestatus.json')
def get_status(self):
return self._status
def update_source_model(self, find='all', limit=-1, only_enabled=False):
self.model.clear()
sourceslist = self.get_sourceslist()
enabled_list = []
for source in sourceslist.list:
if source.type == 'deb' and not source.disabled:
enabled_list.append(source.uri)
if self._status:
self._status.load_objects_from_parser(SOURCE_PARSER)
index = 0
for id in SOURCE_PARSER:
enabled = False
index = index + 1
url = SOURCE_PARSER.get_url(id)
enabled = url in enabled_list
if enabled:
enabled_list.remove(url)
if only_enabled:
if not enabled:
continue
elif not ppa.is_ppa(url):
continue
else:
enabled = not enabled
slug = SOURCE_PARSER.get_slug(id)
comps = SOURCE_PARSER.get_comps(id)
distro = SOURCE_PARSER.get_distro(id)
category = SOURCE_PARSER.get_category(id)
if find != 'all' and category != find:
continue
#TODO real top-10
if limit > 0 and index >= limit:
break
name = SOURCE_PARSER.get_name(id)
comment = SOURCE_PARSER.get_summary(id)
pixbuf = get_source_logo_from_filename(SOURCE_PARSER[id]['logo'])
website = SOURCE_PARSER.get_website(id)
key = SOURCE_PARSER.get_key(id)
if self._status and not self._status.get_read_status(slug):
display = '<b>%s <span foreground="#ff0000">(New!!!)</span>\n%s</b>' % (name, comment)
else:
display = '<b>%s</b>\n%s' % (name, comment)
iter = self.model.append()
self.model.set(iter,
self.COLUMN_ENABLED, enabled,
self.COLUMN_ID, id,
self.COLUMN_CATE, category,
self.COLUMN_URL, url,
self.COLUMN_DISTRO, distro,
self.COLUMN_COMPS, comps,
self.COLUMN_COMMENT, comment,
self.COLUMN_SLUG, slug,
self.COLUMN_NAME, name,
self.COLUMN_DISPLAY, display,
self.COLUMN_LOGO, pixbuf,
self.COLUMN_HOME, website,
self.COLUMN_KEY, key,
)
path = os.path.join(consts.DATA_DIR, 'pixmaps/ppa-logo.png')
pixbuf = icon.get_from_file(path, size=32)
if enabled_list and only_enabled:
for url in enabled_list:
if ppa.is_ppa(url):
iter = self.model.append()
self.model.set(iter,
self.COLUMN_ENABLED, False,
self.COLUMN_ID, 9999,
self.COLUMN_CATE, -1,
self.COLUMN_URL, url,
self.COLUMN_DISTRO, distro,
self.COLUMN_COMPS, comps,
self.COLUMN_COMMENT, '',
self.COLUMN_SLUG, url,
self.COLUMN_NAME, ppa.get_basename(url),
self.COLUMN_DISPLAY, ppa.get_long_name(url),
self.COLUMN_LOGO, pixbuf,
self.COLUMN_HOME, ppa.get_homepage(url),
self.COLUMN_KEY, '',
)
def set_as_read(self, iter, model):
if type(model) == Gtk.TreeModelFilter:
iter = model.convert_iter_to_child_iter(iter)
model = model.get_model()
id = model.get_value(iter, self.COLUMN_ID)
slug = model.get_value(iter, self.COLUMN_SLUG)
if self._status and not self._status.get_read_status(slug):
name = model.get_value(iter, self.COLUMN_NAME)
comment = model.get_value(iter, self.COLUMN_COMMENT)
self._status.set_as_read(slug)
model.set_value(iter,
self.COLUMN_DISPLAY,
'<b>%s</b>\n%s' % (name, comment))
def get_sourcelist_status(self, url):
for source in self.get_sourceslist():
if url in source.str() and source.type == 'deb':
return not source.disabled
return False
@log_func(log)
def on_enable_toggled(self, cell, path):
model = self.get_model()
iter = model.get_iter((int(path),))
id = model.get_value(iter, self.COLUMN_ID)
name = model.get_value(iter, self.COLUMN_NAME)
enabled = model.get_value(iter, self.COLUMN_ENABLED)
url = model.get_value(iter, self.COLUMN_URL)
if self.view_mode == 'view':
conflicts = SOURCE_PARSER.get_conflicts(id)
dependencies = SOURCE_PARSER.get_dependencies(id)
#Convert to real model, because will involke the set method
if type(model) == Gtk.TreeModelFilter:
iter = model.convert_iter_to_child_iter(iter)
model = model.get_model()
if not enabled and conflicts:
conflict_list = []
conflict_name_list = []
for conflict_id in conflicts:
if self.get_source_enabled(conflict_id):
conflict_list.append(conflict_id)
name_list = [r[self.COLUMN_NAME] for r in model if r[self.COLUMN_ID] == conflict_id]
if name_list:
conflict_name_list.extend(name_list)
if conflict_list and conflict_name_list:
full_name = ', '.join(conflict_name_list)
ErrorDialog(_('You can\'t enable this Source because'
'<b>"%(SOURCE)s"</b> conflicts with it.\nTo '
'continue you need to disable <b>"%(SOURCE)s"</b>' \
'first.') % {'SOURCE': full_name}).launch()
model.set(iter, self.COLUMN_ENABLED, enabled)
return
if enabled is False and dependencies:
depend_list = []
depend_name_list = []
for depend_id in dependencies:
if self.get_source_enabled(depend_id) is False:
depend_list.append(depend_id)
name_list = [r[self.COLUMN_NAME] for r in model if r[self.COLUMN_ID] == depend_id]
if name_list:
depend_name_list.extend(name_list)
if depend_list and depend_name_list:
full_name = ', '.join(depend_name_list)
dialog = QuestionDialog(title=_('Dependency Notice'),
message= _('To enable this Source, You need to enable <b>"%s"</b> at first.\nDo you wish to continue?') \
% full_name)
if dialog.run() == Gtk.ResponseType.YES:
for depend_id in depend_list:
self.set_source_enabled(depend_id)
self.set_source_enabled(id)
else:
model.set(iter, self.COLUMN_ENABLED, enabled)
dialog.destroy()
return
if enabled and SOURCE_PARSER.has_reverse_depends(id):
depend_list = []
depend_name_list = []
for depend_id in SOURCE_PARSER.get_reverse_depends(id):
if self.get_source_enabled(depend_id):
depend_list.append(depend_id)
name_list = [r[self.COLUMN_NAME] for r in model if r[self.COLUMN_ID] == depend_id]
if name_list:
depend_name_list.extend(name_list)
if depend_list and depend_name_list:
full_name = ', '.join(depend_name_list)
ErrorDialog(_('You can\'t disable this Source because '
'<b>"%(SOURCE)s"</b> depends on it.\nTo continue '
'you need to disable <b>"%(SOURCE)s"</b> first.') \
% {'SOURCE': full_name}).launch()
model.set(iter, self.COLUMN_ENABLED, enabled)
return
self.do_source_enable(iter, not enabled)
else:
#TODO purge dependencies
status = not enabled
model.set(iter, self.COLUMN_ENABLED, status)
if status:
self.to_purge.append(url)
else:
self.to_purge.remove(url)
self.emit('new_purge', self.to_purge)
def on_source_foreach(self, model, path, iter, id):
m_id = model.get_value(iter, self.COLUMN_ID)
if m_id == id:
if self._foreach_mode == 'get':
self._foreach_take = model.get_value(iter, self.COLUMN_ENABLED)
elif self._foreach_mode == 'set':
self._foreach_take = iter
def on_source_name_foreach(self, model, path, iter, id):
m_id = model.get_value(iter, self.COLUMN_ID)
if m_id == id:
self._foreach_name_take = model.get_value(iter, self.COLUMN_NAME)
def get_source_enabled(self, id):
'''
Search source by id, then get status from model
'''
self._foreach_mode = 'get'
self._foreach_take = None
self.model.foreach(self.on_source_foreach, id)
return self._foreach_take
def set_source_enabled(self, id):
'''
Search source by id, then call do_source_enable
'''
self._foreach_mode = 'set'
self._foreach_status = None
self.model.foreach(self.on_source_foreach, id)
self.do_source_enable(self._foreach_take, True)
def set_source_disable(self, id):
'''
Search source by id, then call do_source_enable
'''
self._foreach_mode = 'set'
self._foreach_status = None
self.model.foreach(self.on_source_foreach, id)
self.do_source_enable(self._foreach_take, False)
def do_source_enable(self, iter, enable):
'''
Do the really source enable or disable action by iter
Only emmit signal when source is changed
'''
model = self.get_model()
id = model.get_value(iter, self.COLUMN_ID)
url = model.get_value(iter, self.COLUMN_URL)
icon = model.get_value(iter, self.COLUMN_LOGO)
comment = model.get_value(iter, self.COLUMN_NAME)
pre_status = self.get_sourcelist_status(url)
result = SOURCE_PARSER.set_enable(id, enable)
log.debug("Setting source %s (%d) to %s, result is %s" % (comment, id, str(enable), result))
if result == 'enabled':
model.set(iter, self.COLUMN_ENABLED, True)
else:
model.set(iter, self.COLUMN_ENABLED, False)
if pre_status != enable:
self.emit('sourcechanged')
if enable:
notify = Notify.Notification(summary=_('New source has been enabled'),
body=_('%s is enabled now, Please click the refresh button to update the application cache.') % comment)
notify.set_icon_from_pixbuf(icon)
notify.set_hint_string ("x-canonical-append", "")
notify.show()
class SourceCategoryView(CategoryView):
def pre_update_cate_model(self):
# self.model.append(None, (-3,
# 'latest',
# _('Latest')))
# self.model.append(None, (-2,
# 'top-10',
# _('Top 10')))
self.model.append(None, (-1,
'enabled-ppa',
_('Enabled PPAs')))
class SourceCenter(TweakModule):
__title__ = _('Source Center')
__desc__ = _('A collection of software sources to ensure your applications are always up-to-date.')
__icon__ = 'software-properties'
__url__ = 'http://ubuntu-tweak.com/source/'
__urltitle__ = _('Visit online Source Center')
__category__ = 'application'
__keywords__ = 'ppa repository app'
__utactive__ = True
def __init__(self):
TweakModule.__init__(self, 'sourcecenter.ui')
self.url = SOURCE_VERSION_URL
set_label_for_stock_button(self.sync_button, _('_Sync'))
self.cateview = SourceCategoryView(os.path.join(SOURCE_ROOT, 'cates.json'))
self.cateview.update_cate_model()
self.cateview.get_selection().connect('changed', self.on_category_changed)
self.left_sw.add(self.cateview)
self.sourceview = SourcesView()
self.sourceview.set_status_active(True)
self.sourceview.update_source_model()
self.sourceview.connect('sourcechanged', self.on_source_changed)
self.sourceview.connect('new_purge', self.on_purge_changed)
self.sourceview.get_selection().connect('changed', self.on_source_selection)
self.sourceview.set_rules_hint(True)
self.right_sw.add(self.sourceview)
self.cateview.set_status_from_view(self.sourceview)
self.update_timestamp()
UPDATE_SETTING.set_value(False)
UPDATE_SETTING.connect_notify(self.on_have_update, data=None)
log.debug('Start check update')
thread.start_new_thread(self.check_update, ())
GObject.timeout_add(60000, self.update_timestamp)
if self.check_source_upgradable() and UPGRADE_DICT:
GObject.idle_add(self.upgrade_sources)
self.add_start(self.main_vbox)
self.connect('realize', self.setup_ui_tasks)
GObject.idle_add(self.show_warning)
@post_ui
def show_warning(self):
if not CONFIG.get_value():
dialog = WarningDialog(title=_('Warning'),
message=_('It is a possible security risk to '
'use packages from Third-Party Sources.\n'
'Please be careful and use only sources you trust.'),
buttons=Gtk.ButtonsType.OK)
checkbutton = CheckButton(_('Never show this dialog'),
key=WARNING_KEY,
backend='gsettings')
dialog.add_option_button(checkbutton)
dialog.run()
dialog.destroy()
def setup_ui_tasks(self, widget):
self.purge_ppa_button.hide()
self.cateview.expand_all()
def check_source_upgradable(self):
log.debug("The check source string is: \"%s\"" % self.__get_disable_string())
for source in SourcesList():
if self.__get_disable_string() in source.str() and \
source.uri in UPGRADE_DICT and \
source.disabled:
return True
return False
def __get_disable_string(self):
APP="update-manager"
DIR="/usr/share/locale"
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
#the "%s" is in front, some is the end, so just return the long one
translated = gettext.gettext("disabled on upgrade to %s")
a, b = translated.split('%s')
return a.strip() or b.strip()
def update_timestamp(self):
self.time_label.set_text(_('Last synced:') + ' ' + utdata.get_last_synced(SOURCE_ROOT))
return True
@post_ui
def upgrade_sources(self):
dialog = QuestionDialog(title=_('Upgrade Third Party Sources'),
message=_('After a successful distribution upgrade, '
'any third-party sources you use will be disabled by default.\n'
'Would you like to re-enable any sources disabled by Update Manager?'))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
proxy.upgrade_sources(self.__get_disable_string(), UPGRADE_DICT)
if not self.check_source_upgradable():
InfoDialog(_('Upgrade Successful!')).launch()
else:
ErrorDialog(_('Upgrade Failed!')).launch()
self.emit('call', 'ubuntutweak.modules.sourceeditor', 'update_source_combo', {})
self.update_sourceview()
@post_ui
def on_have_update(self, *args):
if UPDATE_SETTING.get_value():
dialog = QuestionDialog(_('New source data available, would you like to update?'))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
dialog = FetchingDialog(get_source_data_url(),
self.get_toplevel())
dialog.connect('destroy', self.on_source_data_downloaded)
dialog.run()
dialog.destroy()
def check_update(self):
try:
return utdata.check_update_function(self.url, SOURCE_ROOT, \
UPDATE_SETTING, VERSION_SETTING, \
auto=True)
except Exception, error:
print error
def on_source_selection(self, widget, data=None):
model, iter = widget.get_selected()
if iter:
sourceview = widget.get_tree_view()
sourceview.set_as_read(iter, model)
self.cateview.update_selected_item()
home = model.get_value(iter, self.sourceview.COLUMN_HOME)
url = model.get_value(iter, self.sourceview.COLUMN_URL)
description = model.get_value(iter, self.sourceview.COLUMN_COMMENT)
self.set_details(home, url, description)
def on_category_changed(self, widget, data=None):
self.update_sourceview()
def update_sourceview(self):
self.cateview.set_status_from_view(self.sourceview)
model, iter = self.cateview.get_selection().get_selected()
limit = -1
only_enabled = False
if iter:
find = model[iter][self.cateview.CATE_ID] or 'all'
if find == -3:
find = 'all'
elif find == -2:
find = 'all'
limit = 10
elif find == -1:
find = 'all'
only_enabled = True
else:
find = 'all'
log.debug("Filter for %s" % find)
self.sourceview.update_source_model(find=find,
limit=limit,
only_enabled=only_enabled)
if only_enabled:
self.purge_ppa_button.show()
self.purge_ppa_button.set_sensitive(False)
self.sourceview.source_column.set_title(_('All enabled PPAs (Select and click "Purge PPA" can safely downgrade packages)'))
self.sourceview.view_mode = 'purge'
else:
self.purge_ppa_button.hide()
self.sourceview.source_column.set_title(_('Third-Party Sources'))
self.sourceview.view_mode = 'view'
def set_details(self,
homepage='http://ubuntu-tweak.com',
url='http://ubuntu-tweak.com',
description=None):
self.homepage_button.set_label(homepage)
self.homepage_button.set_uri(homepage)
if ppa.is_ppa(url):
url = ppa.get_homepage(url)
self.url_button.set_label(url)
self.url_button.set_uri(url)
self.description_label.set_text(description or _('Description is here'))
def on_source_changed(self, widget):
self.emit('call', 'ubuntutweak.modules.sourceeditor', 'update_source_combo', {})
@log_func(log)
def on_purge_changed(self, widget, purge_list):
if purge_list:
self.purge_ppa_button.set_sensitive(True)
else:
self.purge_ppa_button.set_sensitive(False)
def on_update_button_clicked(self, widget):
@log_func(log)
def on_update_finished(transaction, status, parent):
log.debug("on_update_finished")
unset_busy(parent)
set_busy(self)
daemon = AptWorker(widget.get_toplevel(),
finish_handler=on_update_finished,
data=self)
daemon.update_cache()
self.emit('call', 'ubuntutweak.modules.appcenter', 'update_app_data', {})
self.emit('call', 'ubuntutweak.modules.updatemanager', 'update_list', {})
def on_source_data_downloaded(self, widget):
path = widget.get_downloaded_file()
tarfile = utdata.create_tarfile(path)
if tarfile.is_valid():
tarfile.extract(consts.CONFIG_ROOT)
self.update_source_data()
utdata.save_synced_timestamp(SOURCE_ROOT)
self.update_timestamp()
else:
ErrorDialog(_('An error occurred whilst downloading the file')).launch()
def update_source_data(self):
global SOURCE_PARSER
SOURCE_PARSER = SourceParser()
self.sourceview.model.clear()
self.sourceview.update_source_model()
self.cateview.update_cate_model()
self.cateview.expand_all()
def on_sync_button_clicked(self, widget):
dialog = CheckSourceDialog(widget.get_toplevel(), self.url)
dialog.run()
dialog.destroy()
if dialog.status == True:
dialog = QuestionDialog(_("Update available, Would you like to update?"))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
dialog = FetchingDialog(parent=self.get_toplevel(), url=get_source_data_url())
dialog.connect('destroy', self.on_source_data_downloaded)
dialog.run()
dialog.destroy()
elif dialog.error == True:
ErrorDialog(_("Network Error, Please check your network connection or the remote server is down.")).launch()
else:
utdata.save_synced_timestamp(SOURCE_ROOT)
self.update_timestamp()
InfoDialog(_("No update available.")).launch()
@log_func(log)
def on_purge_ppa_button_clicked(self, widget):
# name_list is to display the name of PPA
# url_list is to identify the ppa
set_busy(self)
name_list = []
url_list = []
log.debug("self.sourceview.to_purge: %s" % self.sourceview.to_purge)
for url in self.sourceview.to_purge:
name_list.append(ppa.get_short_name(url))
url_list.append(url)
log.debug("PPAs to purge: url_list: %s" % url_list)
package_view = DowngradeView(self)
package_view.update_downgrade_model(url_list)
sw = Gtk.ScrolledWindow(shadow_type=Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
select_pkgs = package_view.get_downgrade_packages()
sw.add(package_view)
#TODO the logic is a little ugly, need to improve the BaseMessageDialog
if not select_pkgs:
message = _("It's safe to purge the PPA, no packages need to be downgraded.")
sw.hide()
else:
message = _("To safely purge the PPA, the following packages must be downgraded.")
sw.show_all()
sw.set_size_request(500, 100)
dialog = QuestionDialog(title=_("You're going to purge \"%s\":") % ', '.join(name_list),
message=message)
dialog.set_resizable(True)
dialog.get_content_area().pack_start(sw, True, True, 0)
dialog.show_all()
response = dialog.run()
dialog.destroy()
# Workflow
# 1. Downgrade all the PPA packages to offical packages
#TODO Maybe not official? Because anther ppa which is enabled may have newer packages then offical
# 2. If succeed, disable PPA, or keep it
if response == Gtk.ResponseType.YES:
log.debug("The select pkgs is: %s", str(select_pkgs))
worker = AptWorker(widget.get_toplevel(),
finish_handler=self.on_package_work_finished,
data={'parent': self,
'url_list': url_list})
worker.downgrade_packages(select_pkgs)
else:
unset_busy(self)
@log_func(log)
def on_package_work_finished(self, transaction, status, kwargs):
unset_busy(self)
parent = kwargs['parent']
url_list = kwargs['url_list']
for url in url_list:
#TODO remove vendor key
result = proxy.purge_source(url, '')
log.debug("Set source: %s to %s" % (url, str(result)))
self.sourceview.to_purge = []
self.update_sourceview()
notify = Notify.Notification(summary=_('PPA has been purged'),
body=_('It is highly recommend to do a "Refresh" source operation.'))
notify.set_icon_from_pixbuf(self.get_pixbuf(size=48))
notify.set_hint_string ("x-canonical-append", "")
notify.show()
|
0x7E/ubuntu-tweak
|
ubuntutweak/admins/sourcecenter.py
|
Python
|
gpl-2.0
| 47,382
|
[
"VisIt"
] |
28cc386cf8eab7d9736579979a181a4bc0a90fc44b59c2aa32a9cd41bc5a1825
|
from base64 import urlsafe_b64encode, urlsafe_b64decode
import copy
import datetime
import sha
import hmac
import os
import pickle
import random
import re
import stat
from string import lower, count
import time
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User, AnonymousUser
from django.db import connection, backend
from django.db import models
import django.http
from django.shortcuts import render_to_response
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django.utils import feedgenerator
from django.views.decorators.cache import cache_page
from proj.giv.consts import *
from proj.giv.htmlconv import tagclean, tagunclean, forshow, stripshow
from proj.giv.utils import *
from proj.giv.db import *
from proj.giv.cache import *
from proj.settings import *
import proj.giv.spam as spam
# Create your models here.
class Constants(models.Model):
class Admin: pass
name = models.CharField(unique=True,max_length=40)
value = models.TextField(default='')
def __unicode__(self):
return unicode(self.name)+u' : '+unicode(self.value)
def constant(name,default=''):
return withcache('constants|'+name+'|default:'+default,
_constant,
{'name':name,'default':default},
duration=10*60)
def _constant(d):
try: return Constants.objects.get(name=d['name']).value
except: return d['default']
def constants():
return withcache('allconstants',
_constants,
{}, duration=10*60)
def _constants(a):
l = Constants.objects.all()
d = {}
for c in l:
d[c.name] = c.value
return d
#class Constant():
# def __getitem__(self,a): return constant(a)
class UserProfile(models.Model):
"""
To create a new user:
> u = User.objects.create_user(username, email, raw_password)
> u.save()
> p = UserProfile(user=u) # also include non-blank fields
> p.save()
To access profile information
> u.get_profile().name
"""
class Admin: pass
# Account Meta
user = models.ForeignKey(User, unique=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, blank=True)
kind = models.CharField(max_length=1, choices=KIND_CHOICES)
# Profile
#pic = models.ImageField(upload_to='') # Todo: Choose upload path
name = models.CharField(max_length=NAME_MAX)
created = models.DateField(null=True, blank=True) #for birth or incorporation date
def url(self):
return '/~' + self.user.username + '/'
def edit_url(self):
return self.url() + 'editprofile/'
def blog_url(self):
return self.url() + 'blog/'
def blogurl(self):
return '/~' + self.user.username + '/blog/'
def __unicode__(self):
return self.name
def isanon(self):
if get_attr('isanon',self,'f') == 't':
return True
return False
def common_name(self):
if self.name.find('(') > 0:
return self.name[:self.name.find('(')].strip()
return self.name
def get_age(self):
if self.created is None or self.kind == 'o':
return None
today = datetime.date.today()
years = today.year - self.created.year
if (today.month, today.day) < (self.created.month, self.created.day):
years -= 1
return years
def get_object(self):
if lower(self.kind) in ['s','student','p','project']:
return Recipient.objects.get(profile=self)
elif lower(self.kind) in ['d','donor']:
return Donor.objects.get(profile=self)
elif lower(self.kind) in ['o','organization']:
return Organization.objects.get(profile=self)
raise hell #user's kind incorrect...
return
def base_image(self):
count = get_attr('imgupcount',self, default = None)
oldfile = os.path.join(IMAGE_DIR,'user',unicode(self.user.id))
if count is not None:
oldfile += '-v'+count
return oldfile
def has_image(self):
try:
os.stat(self.base_image())
except:
return False
return True
def get_image(self,width, height):
oldfile = self.base_image()
try:
oldstat = os.stat(oldfile)
except:
oldfile = os.path.join(IMAGE_DIR,'user','default')
oldstat = os.stat(oldfile)
newfile = oldfile+('-%ix%i.jpg'%(width,height))
try:
newstat = os.stat(newfile)
if newstat[stat.ST_CTIME] < oldstat[stat.ST_CTIME]:
raise hell
except:
self.get_image_convert(height, width, oldfile, newfile)
return os.path.join('user',newfile.split('/')[-1])
def get_image_url(self, width, height):
return '/images/'+self.get_image(width, height)
def set_image(self, data):
count = int(get_attr('imgupcount',self, default = '0'))
count += 1
set_attr('imgupcount', unicode(count), self)
fname = os.path.join(IMAGE_DIR, 'user', unicode(self.user.id))
fname += '-v'+unicode(count)
destination = open(fname, 'wb+')
for chunk in data:
destination.write(chunk)
destination.close()
return fname
def get_image_convert(self,height, width, oldfile, newfile):
cmds = ['convert', '-strip', '-quality', '70']
if (height is not None and
width is not None):
cmds += ['-geometry', '%ix%i'%(width,height)]
cmds+= [oldfile, newfile]
os.spawnv(os.P_WAIT, '/usr/bin/convert', cmds)
def locationbrief(self):
s = ''
try:
if self.kind not in ['s','p']:
s += get_attr('city', self)
except:
pass
try:
s2 = get_attr('province', self)
if len(s2) > 1:
if len(s) > 1:
s += ', '
s += s2
except:
pass
try:
s2 = get_attr('country', self)
if len(s2) > 1:
if len(s) > 1:
s += ', '
s += s2
except:
pass
return s
def latlng(self):
'''
for finding the latitude and longitude of someone, based on their location info
'''
l1 = self.locationbrief()
if l1 == '':
return None
l0 = get_attr('latlng_locbrief',self,default=None)
if l0 is None or l0 != l1:
try:
import urllib2
q = re.sub(', ','+',l1)
q = re.sub(' ','+',q)
p = urllib2.build_opener().open(
"http://maps.google.com/maps?q=%s"%q
).read()
(lat, lng) = re.search(
'id:"addr",lat:(.*?),lng:(.*?),',
p).groups()
set_attr('lat',lat,self)
set_attr('lng',lng,self)
except:
set_attr('lat','',self)
set_attr('lng','',self)
set_attr('latlng_locbrief',l1,self)
lat = get_attr('lat',self,default='')
if lat == '':
return None
lng = get_attr('lng',self)
return (float(lat),float(lng))
def lat(self):
(lat, lng) = self.latlng()
return lat
def lng(self):
(lat, lng) = self.latlng()
return lng
def thumbnail(self):
return self.get_image_url(width=THUMB_WIDTH, height=THUMB_HEIGHT)
def has_completed_captcha(self, val=None):
if val is not None and val == True or val == False:
set_attr('completed_captcha', val, self)
return val
return get_attr('completed_captcha', self, default=False)
def get_about(self):
return get_attr('about', self, default='')
def summary(self, viewer=None):
dict = {'uname':self.user.username,
'uid':self.user.id,
'name':self.name,
'picurl':self.thumbnail(),
'picurl57':self.get_image_url(width=57, height=57),
'location':self.locationbrief(),
'sudoable':viewer is not None and sudoable(viewer, self.user),
'approved':True,
'url':self.url(),
'edit_url':self.edit_url(),
'summary' :stripshow(self.get_about(), maxlen=1000),
'brief_summary': stripshow(self.get_about(), maxlen=125),
'about' : self.get_about(),
'kind' : self.kind,
'isdonor' : self.kind == 'd',
'isstudent' : self.kind == 's',
'isproject' : self.kind == 'p',
'isrec' : self.kind in ['s','p'],
'isorg' : self.kind == 'o',
'id' : self.id
}
vp = None
try: vp = viewer.get_profile()
except: pass
vo = None
try: vo = vp.get_object()
except: pass
obj = self.get_object()
dict['obj_id'] = obj.id
if dict['isorg']:
projcount = obj.recipient_set.filter(
approved=True).filter(profile__kind='p').count()
studcount = obj.recipient_set.filter(
approved=True).filter(profile__kind='s').count()
if projcount > 0:
dict['projects'] = projcount
if studcount > 0:
dict['students'] = studcount
elif dict['isrec']:
dict['approved'] = obj.approved
grantset = obj.grant_set.all().order_by('-created')
curgrant = head(grantset)
if curgrant is not None:
if (vp is not None and
vo is not None and
vp.kind == 'd'):
cgs = curgrant.summary(vo)
else:
cgs = curgrant.summary()
dict['grant'] = cgs
dict['grant_have'] = cgs['have']
dict['grant_want'] = cgs['want']
dict['grant_have_total'] = sum([g.have_informal() for g in grantset])
dict['grant_want_total'] = sum([g.want for g in grantset])
elif dict['isdonor']:
dict['pledged'] = obj.pledged_informal() + obj.gift_cert_balance()
dict['donated'] = obj.donated_informal()
return dict
def get_updates(self, after=None, before=None):
obj = self.get_object()
updates = BlogPost.objects.filter(iscomment=False).order_by('-created')
if isinstance(obj, Donor):
recusers = [rec.profile.user for rec in obj.get_donatees()]
updates = updates.filter(author__in=recusers)
if after is not None:
updates = updates.filter(created__gt=after)
if before is not None:
updates = updates.filter(created__lt=before)
return updates
def updatecounts(self):
#can increase cache duration if it's properly invalidated at appropriate times.
return withcache('updatecounts|'+self.user.username,
_updatecounts,
{'prof':self}, 10*60)
def _updatecounts(d):
prof = d['prof']
return {
'updates' :prof.get_updates().filter(
created__gt=visited(
page='account',
user=prof.user)).count(),
'messages':Message.objects.filter(
to=prof.user).filter(
approved=True).filter(
created__gt=visited(
'inbox',prof.user)).count(),
}
def updatecounts(user):
try:
return user.get_profile().updatecounts()
except:
return {'updates':0,'messages':0}
class Organization(models.Model):
class Admin: pass
profile = models.ForeignKey(UserProfile, unique=True)
def __unicode__(self):
return self.profile.user.username+u' '+self.profile.name
def get_recipients(self):
return Recipient.objects.filter(org=self)
class Recipient(models.Model):
class Admin: pass
profile = models.ForeignKey(UserProfile, unique=True, related_name='recipient_set')
org = models.ForeignKey(Organization, related_name='recipient_set')
approved = models.BooleanField()
postneedapproval = models.BooleanField(default=True)
def __unicode__(self):
return self.profile.user.username+u' '+self.profile.name
def get_donors(self):
gs = [a for a in Grant.objects.filter(rec=self)]
pgs = set(Paymenttogrant.objects.filter(grant__in=gs))
donorids = set([pg.donor.id for pg in pgs
if not pg.isanon()])
donors = Donor.objects.filter(id__in=donorids)
# donors = Donor.objects.filter(
# paymenttogrant_set__grant__in=gs).order_by(
# '-paymenttogrant_set__grant__created')
return donors
def curgrant(self):
return head(self.grant_set.all().order_by('-created'))
def unapprove(self):
user = self.profile.user
profile = self.profile
kind = self.profile.kind
obj = self
obj.approved = False
obj.save()
donors = set([])
teams = set([])
#go through grants
for g in Grant.objects.filter(rec=obj):
for pg in g.paymenttogrant_set.all():
donors.add(pg.donor.id)
for team in pg.donor.donorgroups.all():
team.numdonations -= 1
team.amtdonated -= pg.amount
team.save()
teams.add(team.id)
pg.delete()
g.update()
donors = [Donor.objects.get(id=i).profile.user
for i in donors]
donors.sort(
lambda d1, d2: cmp(d1.username,d2.username))
teams = [DonorGroup.objects.get(id=i)
for i in teams]
for t in teams:
t.recs.remove(obj)
t.updatestats_deep()
return {'donors':donors,'teams':teams}
def receiving_messages(self):
return get_attr('recvmsgs',self.profile,'True')=='True'
class FeaturedProfile(models.Model):
class Admin: pass
profile = models.ForeignKey(UserProfile)
featuretime = models.DateTimeField(auto_now=True)
class Donor(models.Model):
class Admin: pass
profile = models.ForeignKey(UserProfile, unique=True)
def receiving_messages(self):
return True
def __unicode__(self):
return self.profile.user.username+u' '+self.profile.name
def pledged_informal(self):
return sumpw(donor=self)
def pledged_confirmed(self):
return sumpw(donor=self, confirmed=True)
def have_informal(self):
return (sumpw(donor=self) -
sumpg(donor=self) +
self.gift_cert_balance())
def have_confirmed(self):
return (sumpw(donor=self, confirmed=True) -
sumpg(donor=self, confirmed=True) +
self.gift_cert_balance(confirmed=True))
def donated_informal(self):
return sumpg(donor=self)
def donated_confirmed(self):
return sumpg(donor=self, confirmed=True)
def get_donatees(self):
pgs = Paymenttogrant.objects.filter(donor=self)
recs = Recipient.objects.filter(
id__in=set([pg.grant.rec.id for pg in pgs
if not pg.isanon()]))
return recs
def gift_cert_out(self, confirmed=None):
return sumgc(creator=self, confirmed=confirmed)
def gift_cert_in(self, confirmed=None):
return sumgc(receiver=self, confirmed=confirmed)
def gift_cert_balance(self, confirmed=None):
return (self.gift_cert_in(confirmed) -
self.gift_cert_out(confirmed))
def is_balance_positive(self):
return (self.have_informal() > 0)
class DonorGroup(models.Model):
class Admin: pass
slug = models.SlugField(max_length=NAME_MAX, blank=True, null=True)
name = models.CharField(max_length=NAME_MAX)
donors = models.ManyToManyField(Donor, blank=True, null=True, related_name='donorgroups')
admins = models.ManyToManyField(Donor, blank=True, null=True, related_name='donorgroupsadmin')
recs = models.ManyToManyField(Recipient, blank=True, null=True, related_name='donatinggroups')
location = models.TextField(default='')
category = models.TextField(default='')
whydonate = models.TextField(default='')
private = models.BooleanField(default=False)
worldjoin = models.BooleanField(default=True)
memberinvite = models.BooleanField(default=True)
admininvite = models.BooleanField(default=True)
worldemail = models.BooleanField(default=False)
memberemail = models.BooleanField(default=True)
adminemail = models.BooleanField(default=True)
worldblog = models.BooleanField(default=False)
memberblog = models.BooleanField(default=False)
adminblog = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
#statistics
numdonors = models.IntegerField(default=0)
amtdonated = models.IntegerField(default=0)
numdonations = models.IntegerField(default=0)
numstudents = models.IntegerField(default=0)
numprojects = models.IntegerField(default=0)
def is_cause(self):
return self.get_cause_amt() > 0
def get_cause_amt(self):
return int(get_attr('cause_amount',self,'0'))
def set_cause_amt(self,amt):
set_attr('cause_amount',unicode(amt),self)
def get_cause_cur(self):
if not self.is_cause():
return 0
return sum([gc.amount for gc
in xconcat((
GiftCert.objects.filter(receiver=a)
for a in self.admins.all()))
if (get_attr('for_cause',gc,None)
== self.slug)])
#
# int(runquery(
# mkquery('sum(gc.amount)',
# ['giv_giftcert gc',
# 'giv_attrib a',
# 'giv_attrval av'],
# ["a.name='for_cause'",
# "a.id=av.attr_id",
# "av.oid=gc.id",
# "av.tablename='giv_giftcert'",
# "av.val=?"]
# +['(' + ' or '.join(
# ["gc.receiver_id=%i" % a.id
# for a in self.admins.all()])
# + ')']
# ),
# [self.slug],1,1))
def is_member(self, thing):
donor = self.thing2donor(thing)
if donor is None: return False
return (self.donors.filter(id=donor.id).count() > 0)
def join(self, donor):
#todo: put this in a transaction...
if not self.is_member(donor):
self.donors.add(donor)
self.numdonors += 1
self.amtdonated += donor.donated_informal()
self.numdonations += Paymenttogrant.objects.filter(donor=donor).count()
recids = set([pg.grant.rec.id for pg in Paymenttogrant.objects.filter(donor=donor)])
for id in recids:
self.recs.add(Recipient.objects.get(id=id))
self.save()
self.updatestats()
return
def updatestats_deep(self):
a = 0
n = 0
for d in self.donors.all():
a += d.donated_informal()
n += Paymenttogrant.objects.filter(donor=d).count()
self.amtdonated = a
self.numdonations = n
self.updatestats()
return
def updatestats(self):
self.numdonors = self.donors.count()
self.numstudents = self.recs.filter(
profile__kind='s').count()
self.numprojects = self.recs.filter(
profile__kind='p').count()
self.save()
return
def thing2donor(self, thing):
try:
if isinstance(thing,Donor):
return thing
if isinstance(thing,User):
thing = thing.get_profile()
if isinstance(thing,UserProfile):
assert thing.kind=='d'
return thing.get_object()
except: pass
return None
def is_admin(self, thing):
donor = self.thing2donor(thing)
if donor is None: return False
return (self.admins.filter(id=donor.id).count() > 0)
def add_admin(self, thing):
donor = self.thing2donor(thing)
self.join(donor)
if not self.is_admin(donor):
self.admins.add(donor)
return
def del_admin(self, thing):
donor = self.thing2donor(thing)
if self.is_admin(donor):
self.admins.remove(donor)
return
def can_invite(self, thing):
donor = self.thing2donor(thing)
if donor is None: return False
if self.is_admin(donor):
return self.admininvite
elif self.is_member(donor):
return self.memberinvite
return False
def can_message(self, thing):
donor = self.thing2donor(thing)
if donor is None: return False
if self.is_admin(donor):
return self.adminemail
elif self.is_member(donor):
return self.memberemail
return self.worldemail
def can_blog(self, thing):
donor = self.thing2donor(thing)
if donor is None: return False
if self.is_admin(donor):
return self.adminblog
elif self.is_member(donor):
return self.memberblog
return self.worldblog
def get_image(self,width, height):
count = get_attr('imgupcount',self, default = None)
oldfile = os.path.join(IMAGE_DIR,'team',unicode(self.id))
if count is not None:
oldfile += '-v'+count
try:
oldstat = os.stat(oldfile)
except:
oldfile = os.path.join(IMAGE_DIR,'team','default')
oldstat = os.stat(oldfile)
newfile = oldfile+('-%ix%i.jpg'%(width,height))
try:
newstat = os.stat(newfile)
assert newstat[stat.ST_CTIME] >= oldstat[stat.ST_CTIME]
except:
self.get_image_convert(height, width, oldfile, newfile)
return os.path.join('team',newfile.split('/')[-1])
def get_image_url(self, width, height):
return '/images/'+self.get_image(width, height)
def set_image(self, data):
count = int(get_attr('imgupcount',self, default = '0'))
count += 1
set_attr('imgupcount', unicode(count), self)
fname = os.path.join(IMAGE_DIR, 'team', unicode(self.id))
fname += '-v'+unicode(count)
open(fname,'wb').write(data)
return fname
def get_image_convert(self,height, width, oldfile, newfile):
cmds = ['convert', '-strip', '-quality', '70']
if (height is not None and
width is not None):
cmds += ['-geometry', '%ix%i'%(width,height)]
cmds+= [oldfile, newfile]
os.spawnv(os.P_WAIT, '/usr/bin/convert', cmds)
def url(self):
return '/teams/'+self.slug+'/'
class Grant(models.Model):
class Admin: pass
rec = models.ForeignKey(Recipient, related_name='grant_set')
want = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
have_very_informal = models.IntegerField(default=0)
left_very_informal = models.IntegerField(default=100000)
percent_very_informal = models.IntegerField(default=0)
confirmed = models.BooleanField(default=False)
def iscomplete(self):
return (self.percent_very_informal == 100)
def updatewithoutsave(self):
h = self.have_informal()
self.have_very_informal = h
self.left_very_informal = self.want - h
if h >= self.want:
self.percent_very_informal = 100
else:
self.percent_very_informal = min(
(h*100)/(self.want), 99)
return self
def update(self):
self.updatewithoutsave().save()
return
def have_informal(self):
#return sum([pg.amount for pg in self.paymenttogrant_set])
return sumpg(grant=self)
def have_confirmed(self):
#return sum([pg.amount for pg in self.paymenttogrant_set.filter(confirmed=True)])
return sumpg(grant=self, confirmed=True)
def summary(self, don=None):
have = self.have_informal()
if don is None:
donateamts = None
else:
dmax = min(self.want-have, don.have_informal())
donateamts = logscale(start=5, finish=dmax)+[dmax]
if 0 in donateamts:
donateamts = None
return dictcombine([
get_attrs(self),
{'id':self.id,
'want':self.want,
'have':have,
'remaining': self.want - have,
'percent':self.percent_very_informal,
'donateamts':donateamts,
'date':self.created,
}])
def __unicode__(self):
return u'grant, $'+unicode(self.have_very_informal)+u'/$'+unicode(self.want)+u' for '+unicode(self.rec.profile.user.username)+u' created on '+unicode(self.created)
SUBJECT_MAX_LENGTH = 200
class BlogPost(models.Model):
'''
a comment is just a blog post that is the child of a blog
post. this lets us do a slashdot-style comment system if we feel
like it, and doesnt make the typical blog comment system hard.
each post has an author identified by username.
'''
class Admin: pass
iscomment = models.BooleanField()
author = models.ForeignKey(User)
subject = models.CharField(max_length=SUBJECT_MAX_LENGTH)
text = models.TextField()
children = models.ManyToManyField("self", symmetrical=False, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
approved = models.BooleanField()
def hastag(self,tagname):
return self.tags.filter(tag=tagname).count() > 0
def hastags(self,taglist):
tgs = [t.tag for t in self.tags.all()]
for tagname in taglist:
if tagname in tgs: continue
return False
return True
def maybesubject(self):
s = unicode(self.subject)
if len(s) < 1:
return 'No Subject'
return s
def titleinfo(self,baseurl=None,getstr=None):
d = {'url':self.url(baseurl=baseurl,getstr=getstr),'title':self.maybesubject()}
d['month'] = self.created.strftime('%B %Y')
return d
def dicttorender(self, recurselevel=0, viewunauth=False,
baseurl=None,getstr=None):
d = {}
parents = self.blogpost_set.all()
if self.iscomment and parents.count() > 0:
parent = parents[0]
d['parent'] = parent
d['parenturl'] = parent.url()
d['parentid'] = parent.id
d['numcomments'] = int(self.children.all().count())
if recurselevel != 0:
d['children'] = [
p.dicttorender(
recurselevel=recurselevel-1,
viewunauth=viewunauth)
for p in
self.children.all().order_by('-created')
if viewunauth or p.approved]
if len(d['children']) == 0:
del d['children']
d['uname'] = unicode(self.author.username)
d['author'] = unicode(self.author.get_profile().name)
d['authorsummary'] = self.author.get_profile().summary()
d['id'] = unicode(self.id)
d['url'] = self.url(baseurl=baseurl,getstr=getstr)
d['created'] = unicode(self.created) #todo: nicify this
d['modified']=unicode(self.modified) #todo: nicify this
d['subject'] = self.maybesubject()
d['summary'] = stripshow(unicode(self.text), maxlen=1000)
d['text'] = forshow(unicode(self.text))
d['tags'] = [tagclean(t.tag) for t in self.tags.all()]
d['approved'] = self.approved
d['spamclass'] = self.getspamclass()
d['spamtrained'] = get_attr('spamtrained',self,default=False)
d['baseurl'] = baseurl
d['created_day_of_month'] = ("%2i" % self.created.day).replace(' ','0')
d['created_month_caps'] = self.created.strftime("%b").upper()
d['created_year'] = self.created.year
d['created_verbose_time'] = self.created.strftime("%A, %B %e, %Y at %r")
# d['brief'] = d['subject'] + ' ' + stripshow(unicode(self.text), maxlen=130)
return d
def getspamclass(self):
result = get_attr('spamclass',self,default=None)
if result is not None:
return result
if self.author.is_staff:
# staff are always hammy!
return self.setspamclass('ham', train=True)
p = self.author.get_profile()
if p.kind in ['d','donor']:
d = p.get_object()
if d.pledged_informal() > 0:
# if it's a donor who has donated, it's always ham!
return self.setspamclass('ham', train=True)
if len(p.get_about()) > 20 or p.has_image():
# if it's a donor with some self-description, it's probably ham.
return self.setspamclass('ham')
else:
# if it's a non-donor (eg, student or project), default to ham, but don't train.
return self.setspamclass('ham')
# otherwise, guess, but don't save any information.
guess = spam.guess(unicode(self.text))
result = {True:'spam',False:'ham'}[guess[1]>.8]
self.setspamclass(result)
return result
def setspamclass(self,kind,train=False):
if kind not in ['spam','ham']:
raise hell
set_attr('spamclass',kind,self)
if train:
prevtrained = get_attr('spamtrained',self,default=False)
if prevtrained:
prevkind = get_attr('spamclass',self,default=None)
if prevkind is not None:
spam.untrain(prevkind,unicode(self.text))
spam.train(kind,unicode(self.text))
set_attr('spamtrained',True,self)
return kind
def url(self,baseurl=None,getstr=None):
if getstr is None:
getstr = ''
else:
getstr = '?'+getstr
if baseurl is not None:
return baseurl+unicode(self.id)+'/'+getstr
return (self.author.get_profile().url() +
'blog/' + unicode(self.id) + '/' + getstr)
def __unicode__(self):
return (
unicode(self.id) + u' ' +
unicode(self.author.username) +
u' wrote ' +
unicode(self.subject) +
u' on ' +
unicode(self.modified))
class BlogPostTag(models.Model):
class Admin: pass
blogpost = models.ForeignKey(BlogPost, related_name='tags')
tag = models.CharField(max_length=TAG_MAX_LENGTH)
def __unicode__(self):
return (
unicode(self.blogpost.id) +
u' << ' +
unicode(self.blogpost.subject) +
u' >> ' +
unicode(self.tag))
class Message(models.Model):
'''
a message from one user to another
'''
class Admin: pass
fr = models.ForeignKey(User, related_name='messages_out')
to = models.ManyToManyField(User, blank=True, null=True, related_name='messages_in')
subject = models.CharField(max_length=SUBJECT_MAX_LENGTH)
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
approved = models.BooleanField()
def dicttorender(self):
return {'from' : self.fr.username,
'from_img' : self.from_img(),
'to':', '.join([recvr.username
for recvr
in xtake(10,self.to.all())]),
'to_img' : self.to_img(),
'date':self.created,
'subject' : self.subject,
'body':forshow(self.text)}
def from_img(self):
return self.fr.get_profile(
).get_image_url(width=50,height=50)
def to_img(self):
tos = self.to.all()
if tos.count()==1:
try:
return tos[0].get_profile(
).get_image_url(width=50,height=50)
except:
print 'failed in to_img '+unicode(tos[0].id)
return None
def __unicode__(self):
return (
unicode(self.id) + u' ' +
unicode(self.fr.username) +
u' wrote ' +
unicode(self.subject) +
u' on ' +
unicode(self.created))
class HideMessage(models.Model):
'''
messages hidden from the inbox/outbox
'''
class Admin: pass
box = models.CharField(max_length=1) #'o' for outbox, 'i' for inbox
message = models.ForeignKey(Message)
reader = models.ForeignKey(User)
def __unicode__(self):
return (
u'hide message ' +
unicode(self.message.id) +
u' from ' +
{u'o':u'out',u'i':u'in'}[self.box] +
u'box of ' +
unicode(self.reader.username))
class GoogleOrder(models.Model):
"""
this oversimplifies the google checkout schema, assuming that each
'order' can contain only one payment to wallet. luckily we control
the construction of shopping carts.
"""
class Admin: pass
identifier = models.CharField(max_length=30)
status = models.CharField(max_length=30)
donorid = models.IntegerField()
amount = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def charged(self):
'''
call this when notification of the donor being charged is
received.
'''
try:
pw = Paymenttowallet.objects.filter(
kind='googlecheckout').get(
identifier=self.identifier)
pw.amount = self.amount
pw.save()
except:
Paymenttowallet(
donor=Donor.objects.get(id=self.donorid),
amount=self.amount,
kind='googlecheckout',
identifier=self.identifier,
).save()
return
def chargebacked(self):
'''
call this when notification of a chargeback on this order is
received.
sends message to donor and staff that this has occured.
'''
print 'chargeback!'
try:
pw = Paymenttowallet.objects.filter(
kind='googlecheckout').get(
identifier=self.identifier)
except:
print 'chargeback ... but no suitable Paymenttowallet found.'
return
donor = pw.donor
pw.delete()
from proj.giv.messaging import send_message
send_message(fr=User.objects.get(username='givbot'),
to=[donor.profile.user.username]+[u.username for u in User.objects.filter(is_staff=True)],
subject="Givology Google Checkout Error",
body='''
Recently, Google Checkout notified us at Givology that a payment to wallet has been cancelled, which can occur for any number of reasons. The most likely reasons include mistyped address information when signing up for Google Checkout. We will contact you soon to decide on a course of action. We apologize for any inconvenience.
'''+donor.profile.user.username+"\n\n",
approved=True)
return
class Paymenttowallet(models.Model):
class Admin: pass
donor = models.ForeignKey(Donor, related_name='paymenttowallet_set')
amount = models.IntegerField()
kind = models.CharField(max_length=30) # 'googlecheckout', 'authorize.net', whatever
identifier = models.CharField(max_length=160*2) #identifier for payment to wallet, for chargebacks or whatever
confirmed = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'paymenttowallet of %s of $%i via %s' % (self.donor.profile.user.username, self.amount, unicode(self.kind))
class Paymenttogrant(models.Model):
class Admin: pass
donor = models.ForeignKey(Donor, related_name='paymenttogrant_set')
grant = models.ForeignKey(Grant, related_name='paymenttogrant_set')
amount = models.IntegerField()
confirmed = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'paymenttogrant from %s to %s of $%i' % (self.donor.profile.user.username, self.grant.rec.profile.user.username, self.amount)
def isanon(self):
return (get_attr('isanon', self, 'f') == 't')
class GiftCert(models.Model):
class Admin: pass
creator = models.ForeignKey(Donor, related_name='gift_cert_creator_set')
receiver = models.ForeignKey(Donor, related_name='gift_cert_receiver_set', null=True, blank=True)
key = models.CharField(max_length=40)
amount = models.IntegerField()
confirmed = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
received = models.DateTimeField(null=True, blank=True)
def for_cause(self):
return get_attr('for_cause',self,None) is not None
def maybe_receiver_username(self):
if self.receiver is not None:
return self.receiver.profile.user.username
else:
return None
def __unicode__(self):
return (u'GiftCert key="%s", amount="%i"' % (self.key, self.amount) +
u' from %s' % (self.creator.profile.user.username) +
fif(self.receiver is not None, u' to %s' % self.maybe_receiver_username(), ''))
class GradGift(models.Model):
class Admin: pass
creator = models.ForeignKey(Donor, related_name='gradgift_set', null=True, blank=True)
deliverydate = models.DateField(null=True,blank=True)
address = models.TextField()
schoolname = models.TextField(null=True,blank=True)
hometown = models.TextField(null=True, blank=True)
shoutout = models.TextField(null=True,blank=True)
senderemail = models.TextField()
recipientemail = models.TextField(null=True,blank=True)
sendername = models.TextField()
recipientname = models.TextField()
message = models.TextField()
#identifier of google order
googleorder = models.CharField(null=True, blank=True, max_length=30)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class VolunteerWork(models.Model):
class Admin: pass
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
volunteer = models.ForeignKey(User, related_name='volunteer_set')
minutes = models.IntegerField()
action = models.TextField()
actionID = models.IntegerField(default=0)
actionIsCustom = models.BooleanField(default=True)
when = models.DateTimeField()
def when_date_unicode(self):
return self.when.strftime("%Y-%m-%d %H:%M:%S")
def payGrant(donor, grant, amount):
'''
pays a grant from a donor.
'''
pg = Paymenttogrant(grant=grant,
donor=donor,
amount=amount,
)
iscom0 = grant.iscomplete()
pg.save()
grant.update()
# update the 'new donations' page
invalidatecache('donorpage_donations')
invalidatecache('impactthisweek')
invalidatecache('nearlydone')
# update donor teams
for team in donor.donorgroups.all():
#todo: need to put this in a transaction...
team.numdonations += 1
team.amtdonated += amount
team.recs.add(grant.rec)
if (random.random() < (amount * 4.0 / team.amtdonated) or
random.random() < (4.0 / team.numdonations)):
team.updatestats_deep()
else:
team.updatestats()
# if this grant just got completed, send out an email!
if grant.iscomplete() and not iscom0:
print "grant completed! "+grant.rec.profile.name
#todo: somehow make sure that people don't receive two emails for the same grant via near-simultaneous donations...
tolist = set([pg.donor.profile.user.id
for pg in grant.paymenttogrant_set.all()] +
[a.id for a in User.objects.filter(is_staff=True)])
tolist = [User.objects.get(id=id)
for id in tolist]
body = tagclean(
render_to_response(
fif(grant.rec.profile.kind=='s',
'letters/completedstudent.html',
'letters/completedproject.html'),
{'name':grant.rec.profile.common_name(),
'fullname':grant.rec.profile.name,
'url':grant.rec.profile.url(),
}).content)
import proj.giv.messaging
for u in tolist:
proj.giv.messaging.send_message(
User.objects.get(username='givbot'),
[u],
'Grant Completed!',
body,
approved=True)
elif islive: # if not completed, send a thank you message
htmlbody = render_to_string(
tloc+'letters/thankyou.html',
{'donor_url' : donor.profile.url(),
'rec_url' : grant.rec.profile.url(),
'donor_name' : donor.profile.name,
'rec_name' : grant.rec.profile.name,
'rec_uname' : grant.rec.profile.user.username,
})
sendmail('giv_updates@givology.org',
donor.profile.user.email,
'Thank you for donating on Givology!',
body=htmlbody,
htmlbody=htmlbody)
invalidatecache('nearlydone')
invalidatecache('donorpage_donors')
invalidatecache('donorpage_donations')
return pg
def unpayGrant(pg):
#todo: move the code from Recipient.unapprove here
#warning: this function is not usable/safe
grant = pg.grant
pg.delete()
grant.update()
return
def sumpw(donor=None, confirmed=None, createdafter=None, createdbefore=None):
if donor:
donor = donor.id
return tablesum(tablename=Paymenttowallet._meta.db_table,
donor=donor,
confirmed=confirmed,
createdafter=createdafter,
createdbefore=createdbefore,
)
def sumpg(donor=None, grant=None, confirmed=None, createdafter=None, createdbefore=None):
if donor:
donor = donor.id
if grant:
grant = grant.id
return tablesum(tablename=Paymenttogrant._meta.db_table,
grant=grant,
donor=donor,
confirmed=confirmed,
createdafter=createdafter,
createdbefore=createdbefore,
)
def sumgc(creator=None, receiver=None, confirmed=None, createdafter=None, createdbefore=None):
q = ('select sum(amount) from giv_giftcert '+
'where id >= 0 ')
if creator is not None:
q += ' and creator_id = %i'%(creator.id)
if receiver is not None:
q += ' and receiver_id = %i'%(receiver.id)
if confirmed is not None:
if confirmed: #this is mysql specific, i think.
q += ' and confirmed = 1'
else:
q += ' and confirmed = 0'
if createdbefore is not None:
q+=(" and created<'%s'" %
(createdbefore.strftime("%Y-%m-%d %H:%M:%S")))
if createdafter is not None:
q+=(" and created>'%s'" %
(createdafter.strftime("%Y-%m-%d %H:%M:%S")))
cursor = connection.cursor()
cursor.execute(q, [])
try:
return int(cursor.fetchall()[0][0])
except:
return 0
def tablesum(tablename, id=None, donor=None, grant=None, confirmed=None, createdbefore=None, createdafter=None):
wherelist = []
if id is not None:
wherelist.append(' id=%i' % (id))
if donor is not None:
wherelist.append(' donor_id=%i' % (donor))
if grant is not None:
wherelist.append(' grant_id=%i' % (grant))
if createdbefore is not None:
wherelist.append(" created<'%s'" %
(createdbefore.strftime("%Y-%m-%d %H:%M:%S")))
if createdafter is not None:
wherelist.append(" created>'%s'" %
(createdafter.strftime("%Y-%m-%d %H:%M:%S")))
if confirmed is not None:
if confirmed: #this is mysql specific, i think.
wherelist.append(' confirmed=1')
else:
wherelist.append(' confirmed=0')
if len(wherelist)>0:
wherestmt = 'where'+' and'.join(wherelist)
else:
wherestmt = ''
query = ('select sum(amount) '+
'from '+connection.ops.quote_name(tablename)+' '+
wherestmt)
cursor = connection.cursor()
cursor.execute(query, [])
try:
return int(cursor.fetchall()[0][0])
except:
return 0
class PageVisit(models.Model):
'''statistical info on visitors'''
class Admin: pass
page = models.CharField(max_length=50)
who = models.ForeignKey(User, blank=True, null=True)
when = models.DateTimeField(auto_now=True)
ip = models.CharField(max_length=20, blank=True, null=True)
host = models.TextField(blank=True, null=True)
ref = models.TextField(blank=True, null=True)
agent = models.TextField(blank=True, null=True)
def __unicode__(self):
s = u''
if self.ip is not None:
s += unicode(self.ip) + u' '
if self.host is not None:
s += unicode(self.host) + u' '
s += u' ' + unicode(self.when)
return s
def logvisit(page, request):
'''
logs info about a person visiting a page.
'''
pv = PageVisit(page=page)
who = request.user
ip = request.META.get('REMOTE_ADDR', '')
host = request.META.get('REMOTE_HOST', '')
ref = request.META.get('HTTP_REFERER', '')
agent = request.META.get('HTTP_USER_AGENT', '')
if not isinstance(who, AnonymousUser): pv.who = who
if ip != '': pv.ip = ip
if host != '': pv.host = host
if ref != '': pv.ref = ref
if agent != '': pv.agent = agent
pv.save()
class LastVisited(models.Model):
class Admin: pass
page = models.CharField(max_length=50)
who = models.ForeignKey(User)
when = models.DateTimeField(auto_now=True)
def __unicode__(self):
return (unicode(self.who.username) +
u' visited ' +
unicode(self.page) +
u' on ' +
unicode(self.when))
def visit(page, user, keep=False):
'''
by default we dont keep the most recent visitation, unless you set keep to True
'''
if not keep:
try:
LastVisited.objects.filter(
page=page).filter(
who=user).order_by(
'-when')[0].save()
except:
keep = True
if keep:
LastVisited(page=page,
who=user).save()
print page
if page.find('inbox') < 0 or page.find('account') < 0:
invalidatecache('updatecounts|'+user.username)
return
def visited(page, user, default=datetime.datetime(1980,1,1)):
'''
'''
try:
return LastVisited.objects.filter(
page=page).filter(
who=user).order_by(
'-when')[0].when
except:
return default
class Attrib(models.Model):
class Admin: pass
name = models.CharField(max_length=100, unique=True)
kind = models.CharField(max_length=1, choices=KIND_CHOICES, blank=True, null=True)
org = models.ForeignKey(Organization, related_name='attr_set', blank=True, null=True)
description = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.name
class AttrVal(models.Model):
'''
'obj' is an integer which should refer to the id of some row in some table.
'''
class Admin: pass
attr = models.ForeignKey(Attrib, related_name='val_set')
oid = models.IntegerField() #what we're applying the attribute to
tablename = models.CharField(max_length=50) #what table it's from
val = models.TextField()
def __unicode__(self):
return self.attr.name+u', '+unicode(self.oid)+u', '+self.tablename+u': '+self.val
def attrs_cachename(oid, table):
return "attrs|" + unicode(oid) + "|" + table
def attrs_cache_f(d):
vals = AttrVal.objects.filter(
oid=d['oid']).filter(tablename=d['table'])
r = {}
r.update(
(a,b) for (a,b)
in ((val.attr.name,
get_attr(val.attr.name,d['obj']))
for val in vals)
if a is not None and b is not None)
return r
def get_attrs(obj):
'''
returns a dictionary, attribute_name:value, containing all
attribute/value pairs involving the object passed. the object must
be an instance of a django model, so that i can figure out its
table.
'''
if obj is None:
return {}
oid = obj.id
table = obj._meta.db_table
return withcache(attrs_cachename(oid, table),
attrs_cache_f,
{'oid':oid,
'table':table,
'obj':obj,
})
def attr_cachename(oid, table, key):
return "attr|" + unicode(oid) + "|" + table + "|" + key
def attr_cache_f(d):
vals = AttrVal.objects.filter(
oid=d['oid']).filter(
tablename=d['table']).filter(
attr__name=d['key'])
if vals.count()==0:
return 0
else:
return vals[0].val
def get_attr(key, obj, default='assplode'):
'''
like get_attrs, but returns just a value corresponding to an
attribute name. if you want, have a default for when no value is
found, otherwise we raise hell.
'''
retv = 0
if obj is not None:
oid = obj.id
table = obj._meta.db_table
retv = withcache(attr_cachename(oid, table, key),
attr_cache_f,
{'oid':oid,
'table':table,
'key':key,
})
if retv != 0:
return retv
elif retv == 0 and default == 'assplode':
raise hell
elif retv == 0:
return default
def set_attr(key, val, obj):
'''
sets a key/value pair corresponding to an object. if the key name
had not existed before, its attribute table row will be created.
'''
oid = obj.id
table = obj._meta.db_table
attr = None
try:
attr = Attrib.objects.get(name=key)
except:
attr = Attrib(name=key)
attr.save()
av = None
try:
av = AttrVal.objects.filter(
attr=attr).filter(
oid = oid).filter(
tablename = table)[0]
av.val = val
except:
av = AttrVal(attr=attr,
oid = oid,
tablename = table,
val = val)
av.save()
invalidatecache(attrs_cachename(oid, table))
invalidatecache(attr_cachename(oid, table, key))
return
'''
sudo-related
'''
def sudoable(olduser, newuser):
if isinstance(olduser, AnonymousUser):
return False
nprofile = newuser.get_profile()
nobj = nprofile.get_object()
profile = olduser.get_profile()
obj = profile.get_object()
if (olduser.is_staff or
(isinstance(nobj,Recipient) and nobj.org==obj)):
return True
return False
def sudoid(request, newuid):
return sudo(request, User.objects.get(id=newuid))
def sudo(request, newuser):
try:
newuid = newuser.id
if not sudoable(request.user, newuser):
raise hell
request.session['fauxuid'] = newuid
except:
return False
return True
def sudone(request):
if 'fauxuid' in request.session:
del request.session['fauxuid']
return
def apparent_user(request):
user = request.user
user.olduser = None
if isinstance(user, AnonymousUser):
user.is_staff = False
user.username = ''
if 'fauxuid' not in request.session:
return user
newuid = int(request.session['fauxuid'])
newuser = User.objects.get(id=newuid)
if not sudoable(request.user, newuser):
raise hell
newuser.olduser = request.user
return newuser
def canviewunapproved(request):
user = apparent_user(request)
if isinstance(user, AnonymousUser):
return False
return user.is_staff
def parsetags(tagstring, user=None, is_staff=None):
tags = []
team = None
if user is not None and is_staff is None:
is_staff = user.is_staff
elif is_staff is None:
is_staff = False
for tag in tagstring.split(','):
tag = tag.strip()
tag = ''.join(
[c for c in tag if
c.isalnum() or c in '. -_'])
tag = lower(tag)
if (len(tag) > 0 and len(tag) <= TAG_MAX_LENGTH and
(is_staff or tag not in STAFF_TAGS or
(tag=='notes from the field' and
get_attr('isfellow',user,'False') == 'True'))):
if ''.join(take(5,tag)) == 'team ':
try:
team = DonorGroup.objects.get(
slug=''.join(drop(5,tag)))
if team.can_blog(user):
tags.append(tag)
except: pass
else:
tags.append(tag)
tags.sort()
return tags
|
hack4impact/Givology
|
mainSite/source/proj/giv/models.py
|
Python
|
mit
| 54,236
|
[
"VisIt"
] |
99724722fb82d4a594886abbbe726fbd49c89ffc610dea26774f77891d37a9d4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import urllib
import json
from google.appengine.api import urlfetch
emoji_dic = {
"happy": "😀",
"winking": "😉",
"kissing": "😗",
"smirking": "😏",
"crying": "😢",
"woman": "👩",
"peach": "🍑",
"eggplant": "🍆"
}
headers = {'content-type': 'application/json', "Ocp-Apim-Subscription-Key": "3338f3285193465e907eb03a5b2b214a", "Accept": "application/json"}
def adjust_input(i):
build = []
for ele in i:
low = ele.lower()
if low in emoji_dic:
build.append(emoji_dic[low])
else:
build.append(ele)
sentence = " ".join(build)
return sentence
def oge_response(response):
parsed_res = response.split()
if len(parsed_res) == 0:
return "How can Oge give advice if you don't write anything?"
request_body = { "documents": [{"language": "en",
"id": "1",
"text": response,
}]}
url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'
response = urlfetch.fetch(
url,
headers=headers,
method='POST',
payload=json.dumps(request_body)
)
sentiment_json = json.loads(response.content)
sentiment = sentiment_json["documents"][0]["score"]
negative_responses = ["As Helen Keller once said, Keep your face to the sunshine and you cannot see a shadow.",
"As said by the Dalai Lama, In order to carry a positive action we must develop here a positive vision.", "I am sorry about how you feel. A quote I really enjoy is one said by Mewtwo from Pokemon, I see now that the circumstances of one's birth are irrelevant. It is what you do with the gift of life that determines who you are",
"Your feelings are unfortunate, and I would suggest this quote by Martian Manhunter, The future is worth it. All the pain. All the tears. The future is worth the fight", "If you love someone, set them free. If they come back they're yours; if they don't they never were. - Richard Bach", "Truth is everybody is going to hurt you: you just gotta find the ones worth suffering for. - Bob Marley", "Never love someone who treats you like you are ordinary - Oscar Wilde"]
neutral_responses = ["A quote I believe applies to a lot of what we do in life is 'If something is too hard, either you are not doing it right or it is not worth doing'", "The opportunity for success is there, you just need to acknowledge its presence and grasp it" , "In anything you do, you are only as good as you think you are.", "Anything is possible when you have inner peace - Master Shifu(Kung Fu Panda)","To laugh at yourself is to love yourself - Mickey Mouse", "Sucking at something is the first step to becoming sorta good at something - Jake from Adventure Time"]
#
positive_responses = ["Congratulations! You seem to be happy and that’s great! Life is short so enjoying it to its fullest is something special!", "Your happiness is something to be admired! You’re so lucky that things are working out in your life and I hope that things continue to work out!", "That’s wonderful! As an experienced giver of relationship advice, it is always great to hear about people whose lives are filled with joy", "I’m so happy for you! Your joy brings me joy as well!", "Good for you! May happiness forever be in your life!"]
for ele in parsed_res:
if ele.lower() in emoji_dic:
return "I'm sorry, did you mean \"%s\"" % (adjust_input(parsed_res))
if sentiment > 0 and sentiment <0.33:
return "%s <br><br>Also, you shouldn't have to be alone during this time. I would suggest getting a cat to keep you company! Please visit our Ways to Be Happy Page!"% (negative_responses[random.randint(0, len(negative_responses)-1)])
elif sentiment >= 0.33 and sentiment <0.67:
return "NEUTRAL"
else:
return positive_responses[random.randint(0, len(positive_responses)-1)]
|
ivanasamy/OGE-CSSI
|
dear-oge/OGE.py
|
Python
|
mit
| 4,056
|
[
"VisIt"
] |
3cfff32a5137ca3486773dd47dc77b41a343e29d9f4997bc36ebd08b36a7db4b
|
from splinter import Browser
from splinter import Browser
class AbstractPage:
url = "NO_URL_FOR_ABSTRACT_SCREEN"
def load(self):
self.browser = Browser()
self.browser.visit(self.url)
return self
def close_page(self):
self.browser.quit()
|
mbanje/ureport_uganda
|
functional_test/fixtures/abstract_page.py
|
Python
|
bsd-3-clause
| 286
|
[
"VisIt"
] |
9b2fd53bc3283dc1176335f74b65fc8e34d55e830d6ab111749438e59a23548e
|
# $HeadURL$
__RCSID__ = "$Id$"
import os
import time
import re
import threading
import zipfile
import zlib
import DIRAC
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData, ConfigurationData
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
class ServiceInterface( threading.Thread ):
def __init__( self, sURL ):
threading.Thread.__init__( self )
self.sURL = sURL
gLogger.info( "Initializing Configuration Service", "URL is %s" % sURL )
self.__modificationsIgnoreMask = [ '/DIRAC/Configuration/Servers', '/DIRAC/Configuration/Version' ]
gConfigurationData.setAsService()
if not gConfigurationData.isMaster():
gLogger.info( "Starting configuration service as slave" )
gRefresher.autoRefreshAndPublish( self.sURL )
else:
gLogger.info( "Starting configuration service as master" )
gRefresher.disable()
self.__loadConfigurationData()
self.dAliveSlaveServers = {}
self.__launchCheckSlaves()
def isMaster( self ):
return gConfigurationData.isMaster()
def __launchCheckSlaves( self ):
gLogger.info( "Starting purge slaves thread" )
self.setDaemon( 1 )
self.start()
def __loadConfigurationData( self ):
try:
os.makedirs( os.path.join( DIRAC.rootPath, "etc", "csbackup" ) )
except:
pass
gConfigurationData.loadConfigurationData()
if gConfigurationData.isMaster():
bBuiltNewConfiguration = False
if not gConfigurationData.getName():
DIRAC.abort( 10, "Missing name for the configuration to be exported!" )
gConfigurationData.exportName()
sVersion = gConfigurationData.getVersion()
if sVersion == "0":
gLogger.info( "There's no version. Generating a new one" )
gConfigurationData.generateNewVersion()
bBuiltNewConfiguration = True
if self.sURL not in gConfigurationData.getServers():
gConfigurationData.setServers( self.sURL )
bBuiltNewConfiguration = True
gConfigurationData.setMasterServer( self.sURL )
if bBuiltNewConfiguration:
gConfigurationData.writeRemoteConfigurationToDisk()
def __generateNewVersion( self ):
if gConfigurationData.isMaster():
gConfigurationData.generateNewVersion()
gConfigurationData.writeRemoteConfigurationToDisk()
def publishSlaveServer( self, sSlaveURL ):
if not gConfigurationData.isMaster():
return S_ERROR( "Configuration modification is not allowed in this server" )
gLogger.info( "Pinging slave %s" % sSlaveURL )
rpcClient = RPCClient( sSlaveURL, timeout = 10, useCertificates = True )
retVal = rpcClient.ping()
if not retVal[ 'OK' ]:
gLogger.info( "Slave %s didn't reply" % sSlaveURL )
return
if retVal[ 'Value' ][ 'name' ] != 'Configuration/Server':
gLogger.info( "Slave %s is not a CS serveR" % sSlaveURL )
return
bNewSlave = False
if not sSlaveURL in self.dAliveSlaveServers.keys():
bNewSlave = True
gLogger.info( "New slave registered", sSlaveURL )
self.dAliveSlaveServers[ sSlaveURL ] = time.time()
if bNewSlave:
gConfigurationData.setServers( "%s, %s" % ( self.sURL,
", ".join( self.dAliveSlaveServers.keys() ) ) )
self.__generateNewVersion()
def __checkSlavesStatus( self, forceWriteConfiguration = False ):
gLogger.info( "Checking status of slave servers" )
iGraceTime = gConfigurationData.getSlavesGraceTime()
lSlaveURLs = self.dAliveSlaveServers.keys()
bModifiedSlaveServers = False
for sSlaveURL in lSlaveURLs:
if time.time() - self.dAliveSlaveServers[ sSlaveURL ] > iGraceTime:
gLogger.info( "Found dead slave", sSlaveURL )
del( self.dAliveSlaveServers[ sSlaveURL ] )
bModifiedSlaveServers = True
if bModifiedSlaveServers or forceWriteConfiguration:
gConfigurationData.setServers( "%s, %s" % ( self.sURL,
", ".join( self.dAliveSlaveServers.keys() ) ) )
self.__generateNewVersion()
def getCompressedConfiguration( self ):
sData = gConfigurationData.getCompressedData()
def updateConfiguration( self, sBuffer, commiter = "", updateVersionOption = False ):
if not gConfigurationData.isMaster():
return S_ERROR( "Configuration modification is not allowed in this server" )
#Load the data in a ConfigurationData object
oRemoteConfData = ConfigurationData( False )
oRemoteConfData.loadRemoteCFGFromCompressedMem( sBuffer )
if updateVersionOption:
oRemoteConfData.setVersion( gConfigurationData.getVersion() )
#Test that remote and new versions are the same
sRemoteVersion = oRemoteConfData.getVersion()
sLocalVersion = gConfigurationData.getVersion()
gLogger.info( "Checking versions\nremote: %s\nlocal: %s" % ( sRemoteVersion, sLocalVersion ) )
if sRemoteVersion != sLocalVersion:
if not gConfigurationData.mergingEnabled():
return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % ( sLocalVersion, sRemoteVersion ) )
else:
gLogger.info( "AutoMerging new data!" )
if updateVersionOption:
return S_ERROR( "Cannot AutoMerge! version was overwritten" )
result = self.__mergeIndependentUpdates( oRemoteConfData )
if not result[ 'OK' ]:
gLogger.warn( "Could not AutoMerge!", result[ 'Message' ] )
return S_ERROR( "AutoMerge failed: %s" % result[ 'Message' ] )
requestedRemoteCFG = result[ 'Value' ]
gLogger.info( "AutoMerge successful!" )
oRemoteConfData.setRemoteCFG( requestedRemoteCFG )
#Test that configuration names are the same
sRemoteName = oRemoteConfData.getName()
sLocalName = gConfigurationData.getName()
if sRemoteName != sLocalName:
return S_ERROR( "Names differ: Server is %s and remote is %s" % ( sLocalName, sRemoteName ) )
#Update and generate a new version
gLogger.info( "Committing new data..." )
gConfigurationData.lock()
gLogger.info( "Setting the new CFG" )
gConfigurationData.setRemoteCFG( oRemoteConfData.getRemoteCFG() )
gConfigurationData.unlock()
gLogger.info( "Generating new version" )
gConfigurationData.generateNewVersion()
#self.__checkSlavesStatus( forceWriteConfiguration = True )
gLogger.info( "Writing new version to disk!" )
retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % ( commiter, gConfigurationData.getVersion() ) )
gLogger.info( "New version it is!" )
return retVal
def getCompressedConfigurationData( self ):
return gConfigurationData.getCompressedData()
def getVersion( self ):
return gConfigurationData.getVersion()
def getCommitHistory( self ):
files = self.__getCfgBackups( gConfigurationData.getBackupDir() )
backups = [ ".".join( fileName.split( "." )[1:-1] ).split( "@" ) for fileName in files ]
return backups
def run( self ):
while True:
iWaitTime = gConfigurationData.getSlavesGraceTime()
time.sleep( iWaitTime )
self.__checkSlavesStatus()
def getVersionContents( self, date ):
backupDir = gConfigurationData.getBackupDir()
files = self.__getCfgBackups( backupDir, date )
for fileName in files:
zFile = zipfile.ZipFile( "%s/%s" % ( backupDir, fileName ), "r" )
cfgName = zFile.namelist()[0]
#retVal = S_OK( zlib.compress( str( fd.read() ), 9 ) )
retVal = S_OK( zlib.compress( zFile.read( cfgName ) , 9 ) )
zFile.close()
return retVal
return S_ERROR( "Version %s does not exist" % date )
def __getCfgBackups( self, basePath, date = "", subPath = "" ):
rs = re.compile( "^%s\..*%s.*\.zip$" % ( gConfigurationData.getName(), date ) )
fsEntries = os.listdir( "%s/%s" % ( basePath, subPath ) )
fsEntries.sort( reverse = True )
backupsList = []
for entry in fsEntries:
entryPath = "%s/%s/%s" % ( basePath, subPath, entry )
if os.path.isdir( entryPath ):
backupsList.extend( self.__getCfgBackups( basePath, date, "%s/%s" % ( subPath, entry ) ) )
elif os.path.isfile( entryPath ):
if rs.search( entry ):
backupsList.append( "%s/%s" % ( subPath, entry ) )
return backupsList
def __getPreviousCFG( self, oRemoteConfData ):
remoteExpectedVersion = oRemoteConfData.getVersion()
backupsList = self.__getCfgBackups( gConfigurationData.getBackupDir(), date = oRemoteConfData.getVersion() )
if not backupsList:
return S_ERROR( "Could not AutoMerge. Could not retrieve original commiter's version" )
prevRemoteConfData = ConfigurationData()
backFile = backupsList[0]
if backFile[0] == "/":
backFile = os.path.join( gConfigurationData.getBackupDir(), backFile[1:] )
try:
prevRemoteConfData.loadConfigurationData( backFile )
except Exception, e:
return S_ERROR( "Could not load original commiter's version: %s" % str( e ) )
gLogger.info( "Loaded client original version %s" % prevRemoteConfData.getVersion() )
return S_OK( prevRemoteConfData.getRemoteCFG() )
def _checkConflictsInModifications( self, realModList, reqModList, parentSection = "" ):
realModifiedSections = dict( [ ( modAc[1], modAc[3] ) for modAc in realModList if modAc[0].find( 'Sec' ) == len( modAc[0] ) - 3 ] )
reqOptionsModificationList = dict( [ ( modAc[1], modAc[3] ) for modAc in reqModList if modAc[0].find( 'Opt' ) == len( modAc[0] ) - 3 ] )
optionModRequests = 0
for modAc in reqModList:
action = modAc[0]
objectName = modAc[1]
if action == "addSec":
if objectName in realModifiedSections:
return S_ERROR( "Section %s/%s already exists" % ( parentSection, objectName ) )
elif action == "delSec":
if objectName in realModifiedSections:
return S_ERROR( "Section %s/%s cannot be deleted. It has been modified." % ( parentSection, objectName ) )
elif action == "modSec":
if objectName in realModifiedSections:
result = self._checkConflictsInModifications( realModifiedSections[ objectName ],
modAc[3], "%s/%s" % ( parentSection, objectName ) )
if not result[ 'OK' ]:
return result
for modAc in realModList:
action = modAc[0]
objectName = modAc[1]
if action.find( "Opt" ) == len( action ) - 3:
return S_ERROR( "Section %s cannot be merged. Option %s/%s has been modified" % ( parentSection, parentSection, objectName ) )
return S_OK()
def __mergeIndependentUpdates( self, oRemoteConfData ):
#return S_ERROR( "AutoMerge is still not finished. Meanwhile... why don't you get the newest conf and update from there?" )
#Get all the CFGs
curSrvCFG = gConfigurationData.getRemoteCFG().clone()
curCliCFG = oRemoteConfData.getRemoteCFG().clone()
result = self.__getPreviousCFG( oRemoteConfData )
if not result[ 'OK' ]:
return result
prevCliCFG = result[ 'Value' ]
#Try to merge curCli with curSrv. To do so we check the updates from
# prevCli -> curSrv VS prevCli -> curCli
prevCliToCurCliModList = prevCliCFG.getModifications( curCliCFG )
prevCliToCurSrvModList = prevCliCFG.getModifications( curSrvCFG )
result = self._checkConflictsInModifications( prevCliToCurSrvModList,
prevCliToCurCliModList )
if not result[ 'OK' ]:
return S_ERROR( "Cannot AutoMerge: %s" % result[ 'Message' ] )
#Merge!
result = curSrvCFG.applyModifications( prevCliToCurCliModList )
if not result[ 'OK' ]:
return result
return S_OK( curSrvCFG )
|
avedaee/DIRAC
|
ConfigurationSystem/private/ServiceInterface.py
|
Python
|
gpl-3.0
| 11,873
|
[
"DIRAC"
] |
892d09d4ec7fd05f102114471361cc556b00b9a2794711963d9c1e09b1d1a801
|
from copy import deepcopy
import numpy as np
from numpy.polynomial.chebyshev import chebval
from ..utils.smoothing import smoothspec
from .constants import cosmo, lightspeed, jansky_cgs, to_cgs_at_10pc
try:
import fsps
from sedpy.observate import getSED
except(ImportError):
pass
__all__ = ["SSPBasis", "FastSSPBasis", "FastStepBasis",
"MultiSSPBasis"]
to_cgs = to_cgs_at_10pc
class SSPBasis(object):
"""This is a class that wraps the fsps.StellarPopulation object, which is
used for producing SSPs. The ``fsps.StellarPopulation`` object is accessed
as ``SSPBasis().ssp``.
This class allows for the custom calculation of relative SSP weights (by
overriding ``all_ssp_weights``) to produce spectra from arbitrary composite
SFHs. Alternatively, the entire ``get_galaxy_spectrum`` method can be
overridden to produce a galaxy spectrum in some other way, for example
taking advantage of weight calculations within FSPS for tabular SFHs or for
parameteric SFHs.
The base implementation here produces an SSP interpolated to the age given
by ``tage``, with initial mass given by ``mass``. However, this is much
slower than letting FSPS calculate the weights, as implemented in
:py:class:`FastSSPBasis`.
Furthermore, smoothing, redshifting, and filter projections are handled
outside of FSPS, allowing for fast and more flexible algorithms.
:param reserved_params:
These are parameters which have names like the FSPS parameters but will
not be passed to the StellarPopulation object because we are overriding
their functionality using (hopefully more efficient) custom algorithms.
"""
def __init__(self, zcontinuous=1, reserved_params=['tage', 'sigma_smooth'],
interp_type='logarithmic', flux_interp='linear',
mint_log=-3, compute_vega_mags=False,
**kwargs):
"""
:param interp_type: (default: "logarithmic")
Specify whether to linearly interpolate the SSPs in log(t) or t.
For the latter, set this to "linear".
:param flux_interp': (default: "linear")
Whether to compute the final spectrum as \sum_i w_i f_i or
e^{\sum_i w_i ln(f_i)}. Basically you should always do the former,
which is the default.
:param mint_log: (default: -3)
The log of the age (in years) of the youngest SSP. Note that the
SSP at this age is assumed to have the same spectrum as the minimum
age SSP avalibale from fsps. Typically anything less than 4 or so
is fine for this parameter, since the integral converges as log(t)
-> -inf
:param reserved_params:
These are parameters which have names like the FSPS parameters but
will not be passed to the StellarPopulation object because we are
overriding their functionality using (hopefully more efficient)
custom algorithms.
"""
self.interp_type = interp_type
self.mint_log = mint_log
self.flux_interp = flux_interp
self.ssp = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags,
zcontinuous=zcontinuous)
self.ssp.params['sfh'] = 0
self.reserved_params = reserved_params
self.params = {}
self.update(**kwargs)
def update(self, **params):
"""Update the parameters, passing the *unreserved* FSPS parameters
through to the ``fsps.StellarPopulation`` object.
:param params:
A parameter dictionary.
"""
for k, v in params.items():
# try to make parameters scalar
try:
if (len(v) == 1) and callable(v[0]):
self.params[k] = v[0]
else:
self.params[k] = np.squeeze(v)
except:
self.params[k] = v
# Parameters named like FSPS params but that we reserve for use
# here. Do not pass them to FSPS.
if k in self.reserved_params:
continue
# Otherwise if a parameter exists in the FSPS parameter set, pass a
# copy of it in.
if k in self.ssp.params.all_params:
self.ssp.params[k] = deepcopy(v)
# We use FSPS for SSPs !!ONLY!!
# except for FastStepBasis. And CSPSpecBasis. and...
# assert self.ssp.params['sfh'] == 0
def get_galaxy_spectrum(self, **params):
"""Update parameters, then multiply SSP weights by SSP spectra and
stellar masses, and sum.
:returns wave:
Wavelength in angstroms.
:returns spectrum:
Spectrum in units of Lsun/Hz/solar masses formed.
:returns mass_fraction:
Fraction of the formed stellar mass that still exists.
"""
self.update(**params)
# Get the SSP spectra and masses (caching the latter), adding an extra
# mass and spectrum for t=0, using the first SSP spectrum.
wave, ssp_spectra = self.ssp.get_spectrum(tage=0, peraa=False)
ssp_spectra = np.vstack([ssp_spectra[0, :], ssp_spectra])
self.ssp_stellar_masses = np.insert(self.ssp.stellar_mass, 0, 1.0)
if self.flux_interp == 'logarithmic':
ssp_spectra = np.log(ssp_spectra)
# Get weighted sum of spectra, adding the t=0 spectrum using the first SSP.
weights = self.all_ssp_weights
spectrum = np.dot(weights, ssp_spectra) / weights.sum()
if self.flux_interp == 'logarithmic':
spectrum = np.exp(spectrum)
# Get the weighted stellar_mass/mformed ratio
mass_frac = (self.ssp_stellar_masses * weights).sum() / weights.sum()
return wave, spectrum, mass_frac
def get_galaxy_elines(self):
"""Get the wavelengths and specific emission line luminosity of the nebular emission lines
predicted by FSPS. These lines are in units of Lsun/solar mass formed.
This assumes that `get_galaxy_spectrum` has already been called.
:returns ewave:
The *restframe* wavelengths of the emission lines, AA
:returns elum:
Specific luminosities of the nebular emission lines,
Lsun/stellar mass formed
"""
ewave = self.ssp.emline_wavelengths
# This allows subclasses to set their own specific emission line
# luminosities within other methods, e.g., get_galaxy_spectrum, by
# populating the `_specific_line_luminosity` attribute.
elum = getattr(self, "_line_specific_luminosity", None)
if elum is None:
elum = self.ssp.emline_luminosity.copy()
if elum.ndim > 1:
elum = elum[0]
if self.ssp.params["sfh"] == 3:
# tabular sfh
mass = np.sum(self.params.get('mass', 1.0))
elum /= mass
return ewave, elum
def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):
"""Get a spectrum and SED for the given params.
:param outwave: (default: None)
Desired *vacuum* wavelengths. Defaults to the values in
``sps.wavelength``.
:param peraa: (default: False)
If `True`, return the spectrum in erg/s/cm^2/AA instead of AB
maggies.
:param filters: (default: None)
A list of filter objects for which you'd like photometry to be calculated.
:param params:
Optional keywords giving parameter values that will be used to
generate the predicted spectrum.
:returns spec:
Observed frame spectrum in AB maggies, unless ``peraa=True`` in which
case the units are erg/s/cm^2/AA.
:returns phot:
Observed frame photometry in AB maggies.
:returns mass_frac:
The ratio of the surviving stellar mass to the total mass formed.
"""
# Spectrum in Lsun/Hz per solar mass formed, restframe
wave, spectrum, mfrac = self.get_galaxy_spectrum(**params)
# Redshifting + Wavelength solution
# We do it ourselves.
a = 1 + self.params.get('zred', 0)
af = a
b = 0.0
if 'wavecal_coeffs' in self.params:
x = wave - wave.min()
x = 2.0 * (x / x.max()) - 1.0
c = np.insert(self.params['wavecal_coeffs'], 0, 0)
# assume coeeficients give shifts in km/s
b = chebval(x, c) / (lightspeed*1e-13)
wa, sa = wave * (a + b), spectrum * af # Observed Frame
if outwave is None:
outwave = wa
# Observed frame photometry, as absolute maggies
if filters is not None:
flambda = lightspeed/wa**2 * sa * to_cgs
phot = 10**(-0.4 * np.atleast_1d(getSED(wa, flambda, filters)))
# TODO: below is faster for sedpy > 0.2.0
#phot = np.atleast_1d(getSED(wa, lightspeed/wa**2 * sa * to_cgs,
# filters, linear_flux=True))
else:
phot = 0.0
# Spectral smoothing.
do_smooth = (('sigma_smooth' in self.params) and
('sigma_smooth' in self.reserved_params))
if do_smooth:
# We do it ourselves.
smspec = self.smoothspec(wa, sa, self.params['sigma_smooth'],
outwave=outwave, **self.params)
elif outwave is not wa:
# Just interpolate
smspec = np.interp(outwave, wa, sa, left=0, right=0)
else:
# no interpolation necessary
smspec = sa
# Distance dimming and unit conversion
zred = self.params.get('zred', 0.0)
if (zred == 0) or ('lumdist' in self.params):
# Use 10pc for the luminosity distance (or a number
# provided in the dist key in units of Mpc)
dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2
else:
lumdist = cosmo.luminosity_distance(zred).value
dfactor = (lumdist * 1e5)**2
if peraa:
# spectrum will be in erg/s/cm^2/AA
smspec *= to_cgs / dfactor * lightspeed / outwave**2
else:
# Spectrum will be in maggies
smspec *= to_cgs / dfactor / (3631*jansky_cgs)
# Convert from absolute maggies to apparent maggies
phot /= dfactor
# Mass normalization
mass = np.sum(self.params.get('mass', 1.0))
if np.all(self.params.get('mass_units', 'mformed') == 'mstar'):
# Convert input normalization units from current stellar mass to mass formed
mass /= mfrac
return smspec * mass, phot * mass, mfrac
@property
def all_ssp_weights(self):
"""Weights for a single age population. This is a slow way to do this!
"""
if self.interp_type == 'linear':
sspages = np.insert(10**self.logage, 0, 0)
tb = self.params['tage'] * 1e9
elif self.interp_type == 'logarithmic':
sspages = np.insert(self.logage, 0, self.mint_log)
tb = np.log10(self.params['tage']) + 9
ind = np.searchsorted(sspages, tb) # index of the higher bracketing lookback time
dt = (sspages[ind] - sspages[ind - 1])
ww = np.zeros(len(sspages))
ww[ind - 1] = (sspages[ind] - tb) / dt
ww[ind] = (tb - sspages[ind-1]) / dt
return ww
def smoothspec(self, wave, spec, sigma, outwave=None, **kwargs):
outspec = smoothspec(wave, spec, sigma, outwave=outwave, **kwargs)
return outspec
@property
def logage(self):
return self.ssp.ssp_ages.copy()
@property
def wavelengths(self):
return self.ssp.wavelengths.copy()
class FastSSPBasis(SSPBasis):
"""A subclass of :py:class:`SSPBasis` that is a faster way to do SSP models by letting
FSPS do the weight calculations.
"""
def get_galaxy_spectrum(self, **params):
self.update(**params)
wave, spec = self.ssp.get_spectrum(tage=float(self.params['tage']), peraa=False)
return wave, spec, self.ssp.stellar_mass
class FastStepBasis(SSPBasis):
"""Subclass of :py:class:`SSPBasis` that implements a "nonparameteric"
(i.e. binned) SFH. This is accomplished by generating a tabular SFH with
the proper form to be passed to FSPS. The key parameters for this SFH are:
* ``agebins`` - array of shape ``(nbin, 2)`` giving the younger and older
(in lookback time) edges of each bin in log10(years)
* ``mass`` - array of shape ``(nbin,)`` giving the total stellar mass
(in solar masses) **formed** in each bin.
"""
def get_galaxy_spectrum(self, **params):
"""Construct the tabular SFH and feed it to the ``ssp``.
"""
self.update(**params)
# --- check to make sure agebins have minimum spacing of 1million yrs ---
# (this can happen in flex models and will crash FSPS)
if np.min(np.diff(10**self.params['agebins'])) < 1e6:
raise ValueError
mtot = self.params['mass'].sum()
time, sfr, tmax = self.convert_sfh(self.params['agebins'], self.params['mass'])
self.ssp.params["sfh"] = 3 # Hack to avoid rewriting the superclass
self.ssp.set_tabular_sfh(time, sfr)
wave, spec = self.ssp.get_spectrum(tage=tmax, peraa=False)
return wave, spec / mtot, self.ssp.stellar_mass / mtot
def convert_sfh(self, agebins, mformed, epsilon=1e-4, maxage=None):
"""Given arrays of agebins and formed masses with each bin, calculate a
tabular SFH. The resulting time vector has time points either side of
each bin edge with a "closeness" defined by a parameter epsilon.
:param agebins:
An array of bin edges, log(yrs). This method assumes that the
upper edge of one bin is the same as the lower edge of another bin.
ndarray of shape ``(nbin, 2)``
:param mformed:
The stellar mass formed in each bin. ndarray of shape ``(nbin,)``
:param epsilon: (optional, default 1e-4)
A small number used to define the fraction time separation of
adjacent points at the bin edges.
:param maxage: (optional, default: ``None``)
A maximum age of stars in the population, in yrs. If ``None`` then the maximum
value of ``agebins`` is used. Note that an error will occur if maxage
< the maximum age in agebins.
:returns time:
The output time array for use with sfh=3, in Gyr. ndarray of shape (2*N)
:returns sfr:
The output sfr array for use with sfh=3, in M_sun/yr. ndarray of shape (2*N)
:returns maxage:
The maximum valid age in the returned isochrone.
"""
#### create time vector
agebins_yrs = 10**agebins.T
dt = agebins_yrs[1, :] - agebins_yrs[0, :]
bin_edges = np.unique(agebins_yrs)
if maxage is None:
maxage = agebins_yrs.max() # can replace maxage with something else, e.g. tuniv
t = np.concatenate((bin_edges * (1.-epsilon), bin_edges * (1+epsilon)))
t.sort()
t = t[1:-1] # remove older than oldest bin, younger than youngest bin
fsps_time = maxage - t
#### calculate SFR at each t
sfr = mformed / dt
sfrout = np.zeros_like(t)
sfrout[::2] = sfr
sfrout[1::2] = sfr # * (1+epsilon)
return (fsps_time / 1e9)[::-1], sfrout[::-1], maxage / 1e9
class MultiSSPBasis(SSPBasis):
"""An array of basis spectra with different ages, metallicities, and possibly dust
attenuations.
"""
def get_galaxy_spectrum(self):
raise(NotImplementedError)
|
bd-j/prospector
|
prospect/sources/ssp_basis.py
|
Python
|
mit
| 15,977
|
[
"Galaxy"
] |
ef045c1bf00dfb718ba0c6f3a45740d993b18c332b87ff1d663b07d053c1a3cb
|
def endl(*args):
"""
endl(ostream s) -> ostream
Insert a newline character into the given C++ stream @p s.
This is a wrapper around the underlying C++ OStream method
<code>endl</code>. It inserts a newline into the stream
passed as argument. Additionally, it flushes buffered
streams.
@param s the stream to which the newline should be written.
@return the stream @p s.
"""
def flush(*args):
"""
flush(ostream s) -> ostream
Flush the given C++ stream @p s.
This is a wrapper around the underlying C++ OStream method
<code>flush</code>. It flush any pending output in the stream
passed as argument.
@param s the stream to be flushed.
@return the stream @p s.
"""
XMLUnknownError = _libsbml.XMLUnknownError
## @var long XMLUnknownError
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLOutOfMemory = _libsbml.XMLOutOfMemory
## @var long XMLOutOfMemory
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLFileUnreadable = _libsbml.XMLFileUnreadable
## @var long XMLFileUnreadable
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLFileUnwritable = _libsbml.XMLFileUnwritable
## @var long XMLFileUnwritable
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLFileOperationError = _libsbml.XMLFileOperationError
## @var long XMLFileOperationError
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLNetworkAccessError = _libsbml.XMLNetworkAccessError
## @var long XMLNetworkAccessError
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
InternalXMLParserError = _libsbml.InternalXMLParserError
## @var long InternalXMLParserError
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
UnrecognizedXMLParserCode = _libsbml.UnrecognizedXMLParserCode
## @var long UnrecognizedXMLParserCode
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLTranscoderError = _libsbml.XMLTranscoderError
## @var long XMLTranscoderError
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
MissingXMLDecl = _libsbml.MissingXMLDecl
## @var long MissingXMLDecl
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
MissingXMLEncoding = _libsbml.MissingXMLEncoding
## @var long MissingXMLEncoding
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLDecl = _libsbml.BadXMLDecl
## @var long BadXMLDecl
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLDOCTYPE = _libsbml.BadXMLDOCTYPE
## @var long BadXMLDOCTYPE
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
InvalidCharInXML = _libsbml.InvalidCharInXML
## @var long InvalidCharInXML
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadlyFormedXML = _libsbml.BadlyFormedXML
## @var long BadlyFormedXML
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
UnclosedXMLToken = _libsbml.UnclosedXMLToken
## @var long UnclosedXMLToken
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
InvalidXMLConstruct = _libsbml.InvalidXMLConstruct
## @var long InvalidXMLConstruct
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLTagMismatch = _libsbml.XMLTagMismatch
## @var long XMLTagMismatch
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
DuplicateXMLAttribute = _libsbml.DuplicateXMLAttribute
## @var long DuplicateXMLAttribute
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
UndefinedXMLEntity = _libsbml.UndefinedXMLEntity
## @var long UndefinedXMLEntity
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadProcessingInstruction = _libsbml.BadProcessingInstruction
## @var long BadProcessingInstruction
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLPrefix = _libsbml.BadXMLPrefix
## @var long BadXMLPrefix
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLPrefixValue = _libsbml.BadXMLPrefixValue
## @var long BadXMLPrefixValue
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
MissingXMLRequiredAttribute = _libsbml.MissingXMLRequiredAttribute
## @var long MissingXMLRequiredAttribute
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLAttributeTypeMismatch = _libsbml.XMLAttributeTypeMismatch
## @var long XMLAttributeTypeMismatch
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLBadUTF8Content = _libsbml.XMLBadUTF8Content
## @var long XMLBadUTF8Content
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
MissingXMLAttributeValue = _libsbml.MissingXMLAttributeValue
## @var long MissingXMLAttributeValue
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLAttributeValue = _libsbml.BadXMLAttributeValue
## @var long BadXMLAttributeValue
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLAttribute = _libsbml.BadXMLAttribute
## @var long BadXMLAttribute
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
UnrecognizedXMLElement = _libsbml.UnrecognizedXMLElement
## @var long UnrecognizedXMLElement
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLComment = _libsbml.BadXMLComment
## @var long BadXMLComment
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLDeclLocation = _libsbml.BadXMLDeclLocation
## @var long BadXMLDeclLocation
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLUnexpectedEOF = _libsbml.XMLUnexpectedEOF
## @var long XMLUnexpectedEOF
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLIDValue = _libsbml.BadXMLIDValue
## @var long BadXMLIDValue
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLIDRef = _libsbml.BadXMLIDRef
## @var long BadXMLIDRef
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
UninterpretableXMLContent = _libsbml.UninterpretableXMLContent
## @var long UninterpretableXMLContent
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
BadXMLDocumentStructure = _libsbml.BadXMLDocumentStructure
## @var long BadXMLDocumentStructure
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
InvalidAfterXMLContent = _libsbml.InvalidAfterXMLContent
## @var long InvalidAfterXMLContent
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLExpectedQuotedString = _libsbml.XMLExpectedQuotedString
## @var long XMLExpectedQuotedString
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLEmptyValueNotPermitted = _libsbml.XMLEmptyValueNotPermitted
## @var long XMLEmptyValueNotPermitted
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLBadNumber = _libsbml.XMLBadNumber
## @var long XMLBadNumber
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLBadColon = _libsbml.XMLBadColon
## @var long XMLBadColon
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
MissingXMLElements = _libsbml.MissingXMLElements
## @var long MissingXMLElements
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLContentEmpty = _libsbml.XMLContentEmpty
## @var long XMLContentEmpty
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
XMLErrorCodesUpperBound = _libsbml.XMLErrorCodesUpperBound
## @var long XMLErrorCodesUpperBound
##
## A value in the enumeration of all the error and warning codes returned
## by the XML layer in libSBML. Please consult the documentation for
## XMLError for an explanation of the meaning of this particular error
## code.
LIBSBML_CAT_INTERNAL = _libsbml.LIBSBML_CAT_INTERNAL
## @var long LIBSBML_CAT_INTERNAL
## @brief Category code for errors in the XML layer.
##
## This code has the following meaning: A problem involving the libSBML
## software itself or the underlying XML parser. This almost certainly
## indicates a software defect (i.e., bug) in libSBML. Please report
## instances of this to the libSBML developers.
LIBSBML_CAT_SYSTEM = _libsbml.LIBSBML_CAT_SYSTEM
## @var long LIBSBML_CAT_SYSTEM
## @brief Category code for errors in the XML layer.
##
## This code has the following meaning: A problem reported by the
## operating system, such as an inability to read or write a file.
## This indicates something that is not a program error but is outside
## of the control of libSBML.
LIBSBML_CAT_XML = _libsbml.LIBSBML_CAT_XML
## @var long LIBSBML_CAT_XML
## @brief Category code for errors in the XML layer.
##
## This code has the following meaning: A problem in the XML content
## itself. This usually arises from malformed XML or the use of
## constructs not permitted in SBML.
LIBSBML_CAT_SBML = _libsbml.LIBSBML_CAT_SBML
## @var long LIBSBML_CAT_SBML
## @brief Category code for errors in the XML layer.
##
## This code has the following meaning: General SBML error not falling
## into another category below.
LIBSBML_SEV_INFO = _libsbml.LIBSBML_SEV_INFO
## @var long LIBSBML_SEV_INFO
## @brief Severity code for errors in the XML layer.
##
## This code has the following meaning: The error is actually
## informational and not necessarily a serious problem.
LIBSBML_SEV_WARNING = _libsbml.LIBSBML_SEV_WARNING
## @var long LIBSBML_SEV_WARNING
## @brief Severity code for errors in the XML layer.
##
## This code has the following meaning: The error object represents a
## problem that is not serious enough to necessarily stop the problem,
## but applications should take note of the problem and evaluate what
## its implications may be.
LIBSBML_SEV_ERROR = _libsbml.LIBSBML_SEV_ERROR
## @var long LIBSBML_SEV_ERROR
## @brief Severity code for errors in the XML layer.
##
## This code has the following meaning: The error object represents a
## serious error. The application may continue running but it is
## unlikely to be able to continue processing the same XML file or data
## stream.
LIBSBML_SEV_FATAL = _libsbml.LIBSBML_SEV_FATAL
## @var long LIBSBML_SEV_FATAL
## @brief Severity code for errors in the XML layer.
##
## This code has the following meaning: A serious error occurred, such
## as an out-of-memory condition, and the software should terminate
## immediately.
UnknownError = _libsbml.UnknownError
## @var long UnknownError
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NotUTF8 = _libsbml.NotUTF8
## @var long NotUTF8
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UnrecognizedElement = _libsbml.UnrecognizedElement
## @var long UnrecognizedElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NotSchemaConformant = _libsbml.NotSchemaConformant
## @var long NotSchemaConformant
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3NotSchemaConformant = _libsbml.L3NotSchemaConformant
## @var long L3NotSchemaConformant
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidMathElement = _libsbml.InvalidMathElement
## @var long InvalidMathElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedMathMLSymbol = _libsbml.DisallowedMathMLSymbol
## @var long DisallowedMathMLSymbol
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedMathMLEncodingUse = _libsbml.DisallowedMathMLEncodingUse
## @var long DisallowedMathMLEncodingUse
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedDefinitionURLUse = _libsbml.DisallowedDefinitionURLUse
## @var long DisallowedDefinitionURLUse
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BadCsymbolDefinitionURLValue = _libsbml.BadCsymbolDefinitionURLValue
## @var long BadCsymbolDefinitionURLValue
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedMathTypeAttributeUse = _libsbml.DisallowedMathTypeAttributeUse
## @var long DisallowedMathTypeAttributeUse
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedMathTypeAttributeValue = _libsbml.DisallowedMathTypeAttributeValue
## @var long DisallowedMathTypeAttributeValue
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
LambdaOnlyAllowedInFunctionDef = _libsbml.LambdaOnlyAllowedInFunctionDef
## @var long LambdaOnlyAllowedInFunctionDef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BooleanOpsNeedBooleanArgs = _libsbml.BooleanOpsNeedBooleanArgs
## @var long BooleanOpsNeedBooleanArgs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NumericOpsNeedNumericArgs = _libsbml.NumericOpsNeedNumericArgs
## @var long NumericOpsNeedNumericArgs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ArgsToEqNeedSameType = _libsbml.ArgsToEqNeedSameType
## @var long ArgsToEqNeedSameType
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PiecewiseNeedsConsistentTypes = _libsbml.PiecewiseNeedsConsistentTypes
## @var long PiecewiseNeedsConsistentTypes
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PieceNeedsBoolean = _libsbml.PieceNeedsBoolean
## @var long PieceNeedsBoolean
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ApplyCiMustBeUserFunction = _libsbml.ApplyCiMustBeUserFunction
## @var long ApplyCiMustBeUserFunction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ApplyCiMustBeModelComponent = _libsbml.ApplyCiMustBeModelComponent
## @var long ApplyCiMustBeModelComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
KineticLawParametersAreLocalOnly = _libsbml.KineticLawParametersAreLocalOnly
## @var long KineticLawParametersAreLocalOnly
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MathResultMustBeNumeric = _libsbml.MathResultMustBeNumeric
## @var long MathResultMustBeNumeric
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OpsNeedCorrectNumberOfArgs = _libsbml.OpsNeedCorrectNumberOfArgs
## @var long OpsNeedCorrectNumberOfArgs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidNoArgsPassedToFunctionDef = _libsbml.InvalidNoArgsPassedToFunctionDef
## @var long InvalidNoArgsPassedToFunctionDef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DisallowedMathUnitsUse = _libsbml.DisallowedMathUnitsUse
## @var long DisallowedMathUnitsUse
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidUnitsValue = _libsbml.InvalidUnitsValue
## @var long InvalidUnitsValue
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateComponentId = _libsbml.DuplicateComponentId
## @var long DuplicateComponentId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateUnitDefinitionId = _libsbml.DuplicateUnitDefinitionId
## @var long DuplicateUnitDefinitionId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateLocalParameterId = _libsbml.DuplicateLocalParameterId
## @var long DuplicateLocalParameterId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultipleAssignmentOrRateRules = _libsbml.MultipleAssignmentOrRateRules
## @var long MultipleAssignmentOrRateRules
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultipleEventAssignmentsForId = _libsbml.MultipleEventAssignmentsForId
## @var long MultipleEventAssignmentsForId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAndAssignmentRuleForId = _libsbml.EventAndAssignmentRuleForId
## @var long EventAndAssignmentRuleForId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateMetaId = _libsbml.DuplicateMetaId
## @var long DuplicateMetaId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSBOTermSyntax = _libsbml.InvalidSBOTermSyntax
## @var long InvalidSBOTermSyntax
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidMetaidSyntax = _libsbml.InvalidMetaidSyntax
## @var long InvalidMetaidSyntax
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidIdSyntax = _libsbml.InvalidIdSyntax
## @var long InvalidIdSyntax
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidUnitIdSyntax = _libsbml.InvalidUnitIdSyntax
## @var long InvalidUnitIdSyntax
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidNameSyntax = _libsbml.InvalidNameSyntax
## @var long InvalidNameSyntax
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingAnnotationNamespace = _libsbml.MissingAnnotationNamespace
## @var long MissingAnnotationNamespace
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateAnnotationNamespaces = _libsbml.DuplicateAnnotationNamespaces
## @var long DuplicateAnnotationNamespaces
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SBMLNamespaceInAnnotation = _libsbml.SBMLNamespaceInAnnotation
## @var long SBMLNamespaceInAnnotation
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultipleAnnotations = _libsbml.MultipleAnnotations
## @var long MultipleAnnotations
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InconsistentArgUnits = _libsbml.InconsistentArgUnits
## @var long InconsistentArgUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InconsistentKineticLawUnitsL3 = _libsbml.InconsistentKineticLawUnitsL3
## @var long InconsistentKineticLawUnitsL3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AssignRuleCompartmentMismatch = _libsbml.AssignRuleCompartmentMismatch
## @var long AssignRuleCompartmentMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AssignRuleSpeciesMismatch = _libsbml.AssignRuleSpeciesMismatch
## @var long AssignRuleSpeciesMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AssignRuleParameterMismatch = _libsbml.AssignRuleParameterMismatch
## @var long AssignRuleParameterMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AssignRuleStoichiometryMismatch = _libsbml.AssignRuleStoichiometryMismatch
## @var long AssignRuleStoichiometryMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitAssignCompartmenMismatch = _libsbml.InitAssignCompartmenMismatch
## @var long InitAssignCompartmenMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitAssignSpeciesMismatch = _libsbml.InitAssignSpeciesMismatch
## @var long InitAssignSpeciesMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitAssignParameterMismatch = _libsbml.InitAssignParameterMismatch
## @var long InitAssignParameterMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitAssignStoichiometryMismatch = _libsbml.InitAssignStoichiometryMismatch
## @var long InitAssignStoichiometryMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RateRuleCompartmentMismatch = _libsbml.RateRuleCompartmentMismatch
## @var long RateRuleCompartmentMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RateRuleSpeciesMismatch = _libsbml.RateRuleSpeciesMismatch
## @var long RateRuleSpeciesMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RateRuleParameterMismatch = _libsbml.RateRuleParameterMismatch
## @var long RateRuleParameterMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RateRuleStoichiometryMismatch = _libsbml.RateRuleStoichiometryMismatch
## @var long RateRuleStoichiometryMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
KineticLawNotSubstancePerTime = _libsbml.KineticLawNotSubstancePerTime
## @var long KineticLawNotSubstancePerTime
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpeciesInvalidExtentUnits = _libsbml.SpeciesInvalidExtentUnits
## @var long SpeciesInvalidExtentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DelayUnitsNotTime = _libsbml.DelayUnitsNotTime
## @var long DelayUnitsNotTime
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAssignCompartmentMismatch = _libsbml.EventAssignCompartmentMismatch
## @var long EventAssignCompartmentMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAssignSpeciesMismatch = _libsbml.EventAssignSpeciesMismatch
## @var long EventAssignSpeciesMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAssignParameterMismatch = _libsbml.EventAssignParameterMismatch
## @var long EventAssignParameterMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAssignStoichiometryMismatch = _libsbml.EventAssignStoichiometryMismatch
## @var long EventAssignStoichiometryMismatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PriorityUnitsNotDimensionless = _libsbml.PriorityUnitsNotDimensionless
## @var long PriorityUnitsNotDimensionless
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UpperUnitBound = _libsbml.UpperUnitBound
## @var long UpperUnitBound
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OverdeterminedSystem = _libsbml.OverdeterminedSystem
## @var long OverdeterminedSystem
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidModelSBOTerm = _libsbml.InvalidModelSBOTerm
## @var long InvalidModelSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidFunctionDefSBOTerm = _libsbml.InvalidFunctionDefSBOTerm
## @var long InvalidFunctionDefSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidParameterSBOTerm = _libsbml.InvalidParameterSBOTerm
## @var long InvalidParameterSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidInitAssignSBOTerm = _libsbml.InvalidInitAssignSBOTerm
## @var long InvalidInitAssignSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidRuleSBOTerm = _libsbml.InvalidRuleSBOTerm
## @var long InvalidRuleSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidConstraintSBOTerm = _libsbml.InvalidConstraintSBOTerm
## @var long InvalidConstraintSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidReactionSBOTerm = _libsbml.InvalidReactionSBOTerm
## @var long InvalidReactionSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesReferenceSBOTerm = _libsbml.InvalidSpeciesReferenceSBOTerm
## @var long InvalidSpeciesReferenceSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidKineticLawSBOTerm = _libsbml.InvalidKineticLawSBOTerm
## @var long InvalidKineticLawSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidEventSBOTerm = _libsbml.InvalidEventSBOTerm
## @var long InvalidEventSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidEventAssignmentSBOTerm = _libsbml.InvalidEventAssignmentSBOTerm
## @var long InvalidEventAssignmentSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidCompartmentSBOTerm = _libsbml.InvalidCompartmentSBOTerm
## @var long InvalidCompartmentSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesSBOTerm = _libsbml.InvalidSpeciesSBOTerm
## @var long InvalidSpeciesSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidCompartmentTypeSBOTerm = _libsbml.InvalidCompartmentTypeSBOTerm
## @var long InvalidCompartmentTypeSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesTypeSBOTerm = _libsbml.InvalidSpeciesTypeSBOTerm
## @var long InvalidSpeciesTypeSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidTriggerSBOTerm = _libsbml.InvalidTriggerSBOTerm
## @var long InvalidTriggerSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidDelaySBOTerm = _libsbml.InvalidDelaySBOTerm
## @var long InvalidDelaySBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NotesNotInXHTMLNamespace = _libsbml.NotesNotInXHTMLNamespace
## @var long NotesNotInXHTMLNamespace
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NotesContainsXMLDecl = _libsbml.NotesContainsXMLDecl
## @var long NotesContainsXMLDecl
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NotesContainsDOCTYPE = _libsbml.NotesContainsDOCTYPE
## @var long NotesContainsDOCTYPE
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidNotesContent = _libsbml.InvalidNotesContent
## @var long InvalidNotesContent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyOneNotesElementAllowed = _libsbml.OnlyOneNotesElementAllowed
## @var long OnlyOneNotesElementAllowed
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidNamespaceOnSBML = _libsbml.InvalidNamespaceOnSBML
## @var long InvalidNamespaceOnSBML
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingOrInconsistentLevel = _libsbml.MissingOrInconsistentLevel
## @var long MissingOrInconsistentLevel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingOrInconsistentVersion = _libsbml.MissingOrInconsistentVersion
## @var long MissingOrInconsistentVersion
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PackageNSMustMatch = _libsbml.PackageNSMustMatch
## @var long PackageNSMustMatch
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
LevelPositiveInteger = _libsbml.LevelPositiveInteger
## @var long LevelPositiveInteger
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
VersionPositiveInteger = _libsbml.VersionPositiveInteger
## @var long VersionPositiveInteger
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnSBML = _libsbml.AllowedAttributesOnSBML
## @var long AllowedAttributesOnSBML
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingModel = _libsbml.MissingModel
## @var long MissingModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectOrderInModel = _libsbml.IncorrectOrderInModel
## @var long IncorrectOrderInModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EmptyListElement = _libsbml.EmptyListElement
## @var long EmptyListElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NeedCompartmentIfHaveSpecies = _libsbml.NeedCompartmentIfHaveSpecies
## @var long NeedCompartmentIfHaveSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneOfEachListOf = _libsbml.OneOfEachListOf
## @var long OneOfEachListOf
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyFuncDefsInListOfFuncDefs = _libsbml.OnlyFuncDefsInListOfFuncDefs
## @var long OnlyFuncDefsInListOfFuncDefs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyUnitDefsInListOfUnitDefs = _libsbml.OnlyUnitDefsInListOfUnitDefs
## @var long OnlyUnitDefsInListOfUnitDefs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyCompartmentsInListOfCompartments = _libsbml.OnlyCompartmentsInListOfCompartments
## @var long OnlyCompartmentsInListOfCompartments
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlySpeciesInListOfSpecies = _libsbml.OnlySpeciesInListOfSpecies
## @var long OnlySpeciesInListOfSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyParametersInListOfParameters = _libsbml.OnlyParametersInListOfParameters
## @var long OnlyParametersInListOfParameters
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyInitAssignsInListOfInitAssigns = _libsbml.OnlyInitAssignsInListOfInitAssigns
## @var long OnlyInitAssignsInListOfInitAssigns
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyRulesInListOfRules = _libsbml.OnlyRulesInListOfRules
## @var long OnlyRulesInListOfRules
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyConstraintsInListOfConstraints = _libsbml.OnlyConstraintsInListOfConstraints
## @var long OnlyConstraintsInListOfConstraints
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyReactionsInListOfReactions = _libsbml.OnlyReactionsInListOfReactions
## @var long OnlyReactionsInListOfReactions
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyEventsInListOfEvents = _libsbml.OnlyEventsInListOfEvents
## @var long OnlyEventsInListOfEvents
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3ConversionFactorOnModel = _libsbml.L3ConversionFactorOnModel
## @var long L3ConversionFactorOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3TimeUnitsOnModel = _libsbml.L3TimeUnitsOnModel
## @var long L3TimeUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3VolumeUnitsOnModel = _libsbml.L3VolumeUnitsOnModel
## @var long L3VolumeUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3AreaUnitsOnModel = _libsbml.L3AreaUnitsOnModel
## @var long L3AreaUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3LengthUnitsOnModel = _libsbml.L3LengthUnitsOnModel
## @var long L3LengthUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3ExtentUnitsOnModel = _libsbml.L3ExtentUnitsOnModel
## @var long L3ExtentUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnModel = _libsbml.AllowedAttributesOnModel
## @var long AllowedAttributesOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfFuncs = _libsbml.AllowedAttributesOnListOfFuncs
## @var long AllowedAttributesOnListOfFuncs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfUnitDefs = _libsbml.AllowedAttributesOnListOfUnitDefs
## @var long AllowedAttributesOnListOfUnitDefs
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfComps = _libsbml.AllowedAttributesOnListOfComps
## @var long AllowedAttributesOnListOfComps
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfSpecies = _libsbml.AllowedAttributesOnListOfSpecies
## @var long AllowedAttributesOnListOfSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfParams = _libsbml.AllowedAttributesOnListOfParams
## @var long AllowedAttributesOnListOfParams
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfInitAssign = _libsbml.AllowedAttributesOnListOfInitAssign
## @var long AllowedAttributesOnListOfInitAssign
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfRules = _libsbml.AllowedAttributesOnListOfRules
## @var long AllowedAttributesOnListOfRules
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfConstraints = _libsbml.AllowedAttributesOnListOfConstraints
## @var long AllowedAttributesOnListOfConstraints
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfReactions = _libsbml.AllowedAttributesOnListOfReactions
## @var long AllowedAttributesOnListOfReactions
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfEvents = _libsbml.AllowedAttributesOnListOfEvents
## @var long AllowedAttributesOnListOfEvents
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FunctionDefMathNotLambda = _libsbml.FunctionDefMathNotLambda
## @var long FunctionDefMathNotLambda
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidApplyCiInLambda = _libsbml.InvalidApplyCiInLambda
## @var long InvalidApplyCiInLambda
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RecursiveFunctionDefinition = _libsbml.RecursiveFunctionDefinition
## @var long RecursiveFunctionDefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidCiInLambda = _libsbml.InvalidCiInLambda
## @var long InvalidCiInLambda
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidFunctionDefReturnType = _libsbml.InvalidFunctionDefReturnType
## @var long InvalidFunctionDefReturnType
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathElementPerFunc = _libsbml.OneMathElementPerFunc
## @var long OneMathElementPerFunc
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnFunc = _libsbml.AllowedAttributesOnFunc
## @var long AllowedAttributesOnFunc
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidUnitDefId = _libsbml.InvalidUnitDefId
## @var long InvalidUnitDefId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSubstanceRedefinition = _libsbml.InvalidSubstanceRedefinition
## @var long InvalidSubstanceRedefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidLengthRedefinition = _libsbml.InvalidLengthRedefinition
## @var long InvalidLengthRedefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidAreaRedefinition = _libsbml.InvalidAreaRedefinition
## @var long InvalidAreaRedefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidTimeRedefinition = _libsbml.InvalidTimeRedefinition
## @var long InvalidTimeRedefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidVolumeRedefinition = _libsbml.InvalidVolumeRedefinition
## @var long InvalidVolumeRedefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
VolumeLitreDefExponentNotOne = _libsbml.VolumeLitreDefExponentNotOne
## @var long VolumeLitreDefExponentNotOne
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
VolumeMetreDefExponentNot3 = _libsbml.VolumeMetreDefExponentNot3
## @var long VolumeMetreDefExponentNot3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EmptyListOfUnits = _libsbml.EmptyListOfUnits
## @var long EmptyListOfUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidUnitKind = _libsbml.InvalidUnitKind
## @var long InvalidUnitKind
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OffsetNoLongerValid = _libsbml.OffsetNoLongerValid
## @var long OffsetNoLongerValid
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CelsiusNoLongerValid = _libsbml.CelsiusNoLongerValid
## @var long CelsiusNoLongerValid
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EmptyUnitListElement = _libsbml.EmptyUnitListElement
## @var long EmptyUnitListElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneListOfUnitsPerUnitDef = _libsbml.OneListOfUnitsPerUnitDef
## @var long OneListOfUnitsPerUnitDef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyUnitsInListOfUnits = _libsbml.OnlyUnitsInListOfUnits
## @var long OnlyUnitsInListOfUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnUnitDefinition = _libsbml.AllowedAttributesOnUnitDefinition
## @var long AllowedAttributesOnUnitDefinition
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfUnits = _libsbml.AllowedAttributesOnListOfUnits
## @var long AllowedAttributesOnListOfUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnUnit = _libsbml.AllowedAttributesOnUnit
## @var long AllowedAttributesOnUnit
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ZeroDimensionalCompartmentSize = _libsbml.ZeroDimensionalCompartmentSize
## @var long ZeroDimensionalCompartmentSize
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ZeroDimensionalCompartmentUnits = _libsbml.ZeroDimensionalCompartmentUnits
## @var long ZeroDimensionalCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ZeroDimensionalCompartmentConst = _libsbml.ZeroDimensionalCompartmentConst
## @var long ZeroDimensionalCompartmentConst
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndefinedOutsideCompartment = _libsbml.UndefinedOutsideCompartment
## @var long UndefinedOutsideCompartment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RecursiveCompartmentContainment = _libsbml.RecursiveCompartmentContainment
## @var long RecursiveCompartmentContainment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ZeroDCompartmentContainment = _libsbml.ZeroDCompartmentContainment
## @var long ZeroDCompartmentContainment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
Invalid1DCompartmentUnits = _libsbml.Invalid1DCompartmentUnits
## @var long Invalid1DCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
Invalid2DCompartmentUnits = _libsbml.Invalid2DCompartmentUnits
## @var long Invalid2DCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
Invalid3DCompartmentUnits = _libsbml.Invalid3DCompartmentUnits
## @var long Invalid3DCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidCompartmentTypeRef = _libsbml.InvalidCompartmentTypeRef
## @var long InvalidCompartmentTypeRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneDimensionalCompartmentUnits = _libsbml.OneDimensionalCompartmentUnits
## @var long OneDimensionalCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TwoDimensionalCompartmentUnits = _libsbml.TwoDimensionalCompartmentUnits
## @var long TwoDimensionalCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ThreeDimensionalCompartmentUnits = _libsbml.ThreeDimensionalCompartmentUnits
## @var long ThreeDimensionalCompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnCompartment = _libsbml.AllowedAttributesOnCompartment
## @var long AllowedAttributesOnCompartment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitsOnCompartment = _libsbml.NoUnitsOnCompartment
## @var long NoUnitsOnCompartment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesCompartmentRef = _libsbml.InvalidSpeciesCompartmentRef
## @var long InvalidSpeciesCompartmentRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
HasOnlySubsNoSpatialUnits = _libsbml.HasOnlySubsNoSpatialUnits
## @var long HasOnlySubsNoSpatialUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpatialUnitsInZeroD = _libsbml.NoSpatialUnitsInZeroD
## @var long NoSpatialUnitsInZeroD
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoConcentrationInZeroD = _libsbml.NoConcentrationInZeroD
## @var long NoConcentrationInZeroD
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpatialUnitsInOneD = _libsbml.SpatialUnitsInOneD
## @var long SpatialUnitsInOneD
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpatialUnitsInTwoD = _libsbml.SpatialUnitsInTwoD
## @var long SpatialUnitsInTwoD
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpatialUnitsInThreeD = _libsbml.SpatialUnitsInThreeD
## @var long SpatialUnitsInThreeD
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesSusbstanceUnits = _libsbml.InvalidSpeciesSusbstanceUnits
## @var long InvalidSpeciesSusbstanceUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BothAmountAndConcentrationSet = _libsbml.BothAmountAndConcentrationSet
## @var long BothAmountAndConcentrationSet
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NonBoundarySpeciesAssignedAndUsed = _libsbml.NonBoundarySpeciesAssignedAndUsed
## @var long NonBoundarySpeciesAssignedAndUsed
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NonConstantSpeciesUsed = _libsbml.NonConstantSpeciesUsed
## @var long NonConstantSpeciesUsed
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesTypeRef = _libsbml.InvalidSpeciesTypeRef
## @var long InvalidSpeciesTypeRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultSpeciesSameTypeInCompartment = _libsbml.MultSpeciesSameTypeInCompartment
## @var long MultSpeciesSameTypeInCompartment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingSpeciesCompartment = _libsbml.MissingSpeciesCompartment
## @var long MissingSpeciesCompartment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpatialSizeUnitsRemoved = _libsbml.SpatialSizeUnitsRemoved
## @var long SpatialSizeUnitsRemoved
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SubstanceUnitsOnSpecies = _libsbml.SubstanceUnitsOnSpecies
## @var long SubstanceUnitsOnSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConversionFactorOnSpecies = _libsbml.ConversionFactorOnSpecies
## @var long ConversionFactorOnSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnSpecies = _libsbml.AllowedAttributesOnSpecies
## @var long AllowedAttributesOnSpecies
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidParameterUnits = _libsbml.InvalidParameterUnits
## @var long InvalidParameterUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ParameterUnits = _libsbml.ParameterUnits
## @var long ParameterUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConversionFactorMustConstant = _libsbml.ConversionFactorMustConstant
## @var long ConversionFactorMustConstant
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnParameter = _libsbml.AllowedAttributesOnParameter
## @var long AllowedAttributesOnParameter
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidInitAssignSymbol = _libsbml.InvalidInitAssignSymbol
## @var long InvalidInitAssignSymbol
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultipleInitAssignments = _libsbml.MultipleInitAssignments
## @var long MultipleInitAssignments
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitAssignmentAndRuleForSameId = _libsbml.InitAssignmentAndRuleForSameId
## @var long InitAssignmentAndRuleForSameId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathElementPerInitialAssign = _libsbml.OneMathElementPerInitialAssign
## @var long OneMathElementPerInitialAssign
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnInitialAssign = _libsbml.AllowedAttributesOnInitialAssign
## @var long AllowedAttributesOnInitialAssign
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidAssignRuleVariable = _libsbml.InvalidAssignRuleVariable
## @var long InvalidAssignRuleVariable
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidRateRuleVariable = _libsbml.InvalidRateRuleVariable
## @var long InvalidRateRuleVariable
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AssignmentToConstantEntity = _libsbml.AssignmentToConstantEntity
## @var long AssignmentToConstantEntity
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RateRuleForConstantEntity = _libsbml.RateRuleForConstantEntity
## @var long RateRuleForConstantEntity
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RepeatedRule10304 = _libsbml.RepeatedRule10304
## @var long RepeatedRule10304
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CircularRuleDependency = _libsbml.CircularRuleDependency
## @var long CircularRuleDependency
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathElementPerRule = _libsbml.OneMathElementPerRule
## @var long OneMathElementPerRule
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnAssignRule = _libsbml.AllowedAttributesOnAssignRule
## @var long AllowedAttributesOnAssignRule
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnRateRule = _libsbml.AllowedAttributesOnRateRule
## @var long AllowedAttributesOnRateRule
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnAlgRule = _libsbml.AllowedAttributesOnAlgRule
## @var long AllowedAttributesOnAlgRule
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstraintMathNotBoolean = _libsbml.ConstraintMathNotBoolean
## @var long ConstraintMathNotBoolean
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectOrderInConstraint = _libsbml.IncorrectOrderInConstraint
## @var long IncorrectOrderInConstraint
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstraintNotInXHTMLNamespace = _libsbml.ConstraintNotInXHTMLNamespace
## @var long ConstraintNotInXHTMLNamespace
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstraintContainsXMLDecl = _libsbml.ConstraintContainsXMLDecl
## @var long ConstraintContainsXMLDecl
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstraintContainsDOCTYPE = _libsbml.ConstraintContainsDOCTYPE
## @var long ConstraintContainsDOCTYPE
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidConstraintContent = _libsbml.InvalidConstraintContent
## @var long InvalidConstraintContent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathElementPerConstraint = _libsbml.OneMathElementPerConstraint
## @var long OneMathElementPerConstraint
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMessageElementPerConstraint = _libsbml.OneMessageElementPerConstraint
## @var long OneMessageElementPerConstraint
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnConstraint = _libsbml.AllowedAttributesOnConstraint
## @var long AllowedAttributesOnConstraint
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoReactantsOrProducts = _libsbml.NoReactantsOrProducts
## @var long NoReactantsOrProducts
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectOrderInReaction = _libsbml.IncorrectOrderInReaction
## @var long IncorrectOrderInReaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EmptyListInReaction = _libsbml.EmptyListInReaction
## @var long EmptyListInReaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidReactantsProductsList = _libsbml.InvalidReactantsProductsList
## @var long InvalidReactantsProductsList
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidModifiersList = _libsbml.InvalidModifiersList
## @var long InvalidModifiersList
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneSubElementPerReaction = _libsbml.OneSubElementPerReaction
## @var long OneSubElementPerReaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CompartmentOnReaction = _libsbml.CompartmentOnReaction
## @var long CompartmentOnReaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnReaction = _libsbml.AllowedAttributesOnReaction
## @var long AllowedAttributesOnReaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSpeciesReference = _libsbml.InvalidSpeciesReference
## @var long InvalidSpeciesReference
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RepeatedRule20611 = _libsbml.RepeatedRule20611
## @var long RepeatedRule20611
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BothStoichiometryAndMath = _libsbml.BothStoichiometryAndMath
## @var long BothStoichiometryAndMath
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnSpeciesReference = _libsbml.AllowedAttributesOnSpeciesReference
## @var long AllowedAttributesOnSpeciesReference
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnModifier = _libsbml.AllowedAttributesOnModifier
## @var long AllowedAttributesOnModifier
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredSpeciesRef = _libsbml.UndeclaredSpeciesRef
## @var long UndeclaredSpeciesRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectOrderInKineticLaw = _libsbml.IncorrectOrderInKineticLaw
## @var long IncorrectOrderInKineticLaw
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EmptyListInKineticLaw = _libsbml.EmptyListInKineticLaw
## @var long EmptyListInKineticLaw
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NonConstantLocalParameter = _libsbml.NonConstantLocalParameter
## @var long NonConstantLocalParameter
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SubsUnitsNoLongerValid = _libsbml.SubsUnitsNoLongerValid
## @var long SubsUnitsNoLongerValid
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TimeUnitsNoLongerValid = _libsbml.TimeUnitsNoLongerValid
## @var long TimeUnitsNoLongerValid
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneListOfPerKineticLaw = _libsbml.OneListOfPerKineticLaw
## @var long OneListOfPerKineticLaw
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyLocalParamsInListOfLocalParams = _libsbml.OnlyLocalParamsInListOfLocalParams
## @var long OnlyLocalParamsInListOfLocalParams
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfLocalParam = _libsbml.AllowedAttributesOnListOfLocalParam
## @var long AllowedAttributesOnListOfLocalParam
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathPerKineticLaw = _libsbml.OneMathPerKineticLaw
## @var long OneMathPerKineticLaw
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredSpeciesInStoichMath = _libsbml.UndeclaredSpeciesInStoichMath
## @var long UndeclaredSpeciesInStoichMath
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnKineticLaw = _libsbml.AllowedAttributesOnKineticLaw
## @var long AllowedAttributesOnKineticLaw
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfSpeciesRef = _libsbml.AllowedAttributesOnListOfSpeciesRef
## @var long AllowedAttributesOnListOfSpeciesRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfMods = _libsbml.AllowedAttributesOnListOfMods
## @var long AllowedAttributesOnListOfMods
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnLocalParameter = _libsbml.AllowedAttributesOnLocalParameter
## @var long AllowedAttributesOnLocalParameter
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingTriggerInEvent = _libsbml.MissingTriggerInEvent
## @var long MissingTriggerInEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TriggerMathNotBoolean = _libsbml.TriggerMathNotBoolean
## @var long TriggerMathNotBoolean
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MissingEventAssignment = _libsbml.MissingEventAssignment
## @var long MissingEventAssignment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TimeUnitsEvent = _libsbml.TimeUnitsEvent
## @var long TimeUnitsEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectOrderInEvent = _libsbml.IncorrectOrderInEvent
## @var long IncorrectOrderInEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ValuesFromTriggerTimeNeedDelay = _libsbml.ValuesFromTriggerTimeNeedDelay
## @var long ValuesFromTriggerTimeNeedDelay
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DelayNeedsValuesFromTriggerTime = _libsbml.DelayNeedsValuesFromTriggerTime
## @var long DelayNeedsValuesFromTriggerTime
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathPerTrigger = _libsbml.OneMathPerTrigger
## @var long OneMathPerTrigger
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathPerDelay = _libsbml.OneMathPerDelay
## @var long OneMathPerDelay
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidEventAssignmentVariable = _libsbml.InvalidEventAssignmentVariable
## @var long InvalidEventAssignmentVariable
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventAssignmentForConstantEntity = _libsbml.EventAssignmentForConstantEntity
## @var long EventAssignmentForConstantEntity
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathPerEventAssignment = _libsbml.OneMathPerEventAssignment
## @var long OneMathPerEventAssignment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnEventAssignment = _libsbml.AllowedAttributesOnEventAssignment
## @var long AllowedAttributesOnEventAssignment
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyOneDelayPerEvent = _libsbml.OnlyOneDelayPerEvent
## @var long OnlyOneDelayPerEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneListOfEventAssignmentsPerEvent = _libsbml.OneListOfEventAssignmentsPerEvent
## @var long OneListOfEventAssignmentsPerEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyEventAssignInListOfEventAssign = _libsbml.OnlyEventAssignInListOfEventAssign
## @var long OnlyEventAssignInListOfEventAssign
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnListOfEventAssign = _libsbml.AllowedAttributesOnListOfEventAssign
## @var long AllowedAttributesOnListOfEventAssign
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnEvent = _libsbml.AllowedAttributesOnEvent
## @var long AllowedAttributesOnEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnTrigger = _libsbml.AllowedAttributesOnTrigger
## @var long AllowedAttributesOnTrigger
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnDelay = _libsbml.AllowedAttributesOnDelay
## @var long AllowedAttributesOnDelay
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PersistentNotBoolean = _libsbml.PersistentNotBoolean
## @var long PersistentNotBoolean
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitialValueNotBoolean = _libsbml.InitialValueNotBoolean
## @var long InitialValueNotBoolean
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OnlyOnePriorityPerEvent = _libsbml.OnlyOnePriorityPerEvent
## @var long OnlyOnePriorityPerEvent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OneMathPerPriority = _libsbml.OneMathPerPriority
## @var long OneMathPerPriority
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AllowedAttributesOnPriority = _libsbml.AllowedAttributesOnPriority
## @var long AllowedAttributesOnPriority
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
GeneralWarningNotSpecified = _libsbml.GeneralWarningNotSpecified
## @var long GeneralWarningNotSpecified
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CompartmentShouldHaveSize = _libsbml.CompartmentShouldHaveSize
## @var long CompartmentShouldHaveSize
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpeciesShouldHaveValue = _libsbml.SpeciesShouldHaveValue
## @var long SpeciesShouldHaveValue
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ParameterShouldHaveUnits = _libsbml.ParameterShouldHaveUnits
## @var long ParameterShouldHaveUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
LocalParameterShadowsId = _libsbml.LocalParameterShadowsId
## @var long LocalParameterShadowsId
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
LibSBMLAdditionalCodesLowerBound = _libsbml.LibSBMLAdditionalCodesLowerBound
## @var long LibSBMLAdditionalCodesLowerBound
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CannotConvertToL1V1 = _libsbml.CannotConvertToL1V1
## @var long CannotConvertToL1V1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoEventsInL1 = _libsbml.NoEventsInL1
## @var long NoEventsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoFunctionDefinitionsInL1 = _libsbml.NoFunctionDefinitionsInL1
## @var long NoFunctionDefinitionsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoConstraintsInL1 = _libsbml.NoConstraintsInL1
## @var long NoConstraintsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoInitialAssignmentsInL1 = _libsbml.NoInitialAssignmentsInL1
## @var long NoInitialAssignmentsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesTypesInL1 = _libsbml.NoSpeciesTypesInL1
## @var long NoSpeciesTypesInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoCompartmentTypeInL1 = _libsbml.NoCompartmentTypeInL1
## @var long NoCompartmentTypeInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoNon3DCompartmentsInL1 = _libsbml.NoNon3DCompartmentsInL1
## @var long NoNon3DCompartmentsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoFancyStoichiometryMathInL1 = _libsbml.NoFancyStoichiometryMathInL1
## @var long NoFancyStoichiometryMathInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoNonIntegerStoichiometryInL1 = _libsbml.NoNonIntegerStoichiometryInL1
## @var long NoNonIntegerStoichiometryInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitMultipliersOrOffsetsInL1 = _libsbml.NoUnitMultipliersOrOffsetsInL1
## @var long NoUnitMultipliersOrOffsetsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpeciesCompartmentRequiredInL1 = _libsbml.SpeciesCompartmentRequiredInL1
## @var long SpeciesCompartmentRequiredInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesSpatialSizeUnitsInL1 = _libsbml.NoSpeciesSpatialSizeUnitsInL1
## @var long NoSpeciesSpatialSizeUnitsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSBOTermsInL1 = _libsbml.NoSBOTermsInL1
## @var long NoSBOTermsInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictUnitsRequiredInL1 = _libsbml.StrictUnitsRequiredInL1
## @var long StrictUnitsRequiredInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConversionFactorNotInL1 = _libsbml.ConversionFactorNotInL1
## @var long ConversionFactorNotInL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CompartmentNotOnL1Reaction = _libsbml.CompartmentNotOnL1Reaction
## @var long CompartmentNotOnL1Reaction
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ExtentUnitsNotSubstance = _libsbml.ExtentUnitsNotSubstance
## @var long ExtentUnitsNotSubstance
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoConstraintsInL2v1 = _libsbml.NoConstraintsInL2v1
## @var long NoConstraintsInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoInitialAssignmentsInL2v1 = _libsbml.NoInitialAssignmentsInL2v1
## @var long NoInitialAssignmentsInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesTypeInL2v1 = _libsbml.NoSpeciesTypeInL2v1
## @var long NoSpeciesTypeInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoCompartmentTypeInL2v1 = _libsbml.NoCompartmentTypeInL2v1
## @var long NoCompartmentTypeInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSBOTermsInL2v1 = _libsbml.NoSBOTermsInL2v1
## @var long NoSBOTermsInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoIdOnSpeciesReferenceInL2v1 = _libsbml.NoIdOnSpeciesReferenceInL2v1
## @var long NoIdOnSpeciesReferenceInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoDelayedEventAssignmentInL2v1 = _libsbml.NoDelayedEventAssignmentInL2v1
## @var long NoDelayedEventAssignmentInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictUnitsRequiredInL2v1 = _libsbml.StrictUnitsRequiredInL2v1
## @var long StrictUnitsRequiredInL2v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IntegerSpatialDimensions = _libsbml.IntegerSpatialDimensions
## @var long IntegerSpatialDimensions
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StoichiometryMathNotYetSupported = _libsbml.StoichiometryMathNotYetSupported
## @var long StoichiometryMathNotYetSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PriorityLostFromL3 = _libsbml.PriorityLostFromL3
## @var long PriorityLostFromL3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NonPersistentNotSupported = _libsbml.NonPersistentNotSupported
## @var long NonPersistentNotSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitialValueFalseEventNotSupported = _libsbml.InitialValueFalseEventNotSupported
## @var long InitialValueFalseEventNotSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SBOTermNotUniversalInL2v2 = _libsbml.SBOTermNotUniversalInL2v2
## @var long SBOTermNotUniversalInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitOffsetInL2v2 = _libsbml.NoUnitOffsetInL2v2
## @var long NoUnitOffsetInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawTimeUnitsInL2v2 = _libsbml.NoKineticLawTimeUnitsInL2v2
## @var long NoKineticLawTimeUnitsInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawSubstanceUnitsInL2v2 = _libsbml.NoKineticLawSubstanceUnitsInL2v2
## @var long NoKineticLawSubstanceUnitsInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoDelayedEventAssignmentInL2v2 = _libsbml.NoDelayedEventAssignmentInL2v2
## @var long NoDelayedEventAssignmentInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ModelSBOBranchChangedBeyondL2v2 = _libsbml.ModelSBOBranchChangedBeyondL2v2
## @var long ModelSBOBranchChangedBeyondL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictUnitsRequiredInL2v2 = _libsbml.StrictUnitsRequiredInL2v2
## @var long StrictUnitsRequiredInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictSBORequiredInL2v2 = _libsbml.StrictSBORequiredInL2v2
## @var long StrictSBORequiredInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateAnnotationInvalidInL2v2 = _libsbml.DuplicateAnnotationInvalidInL2v2
## @var long DuplicateAnnotationInvalidInL2v2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitOffsetInL2v3 = _libsbml.NoUnitOffsetInL2v3
## @var long NoUnitOffsetInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawTimeUnitsInL2v3 = _libsbml.NoKineticLawTimeUnitsInL2v3
## @var long NoKineticLawTimeUnitsInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawSubstanceUnitsInL2v3 = _libsbml.NoKineticLawSubstanceUnitsInL2v3
## @var long NoKineticLawSubstanceUnitsInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesSpatialSizeUnitsInL2v3 = _libsbml.NoSpeciesSpatialSizeUnitsInL2v3
## @var long NoSpeciesSpatialSizeUnitsInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoEventTimeUnitsInL2v3 = _libsbml.NoEventTimeUnitsInL2v3
## @var long NoEventTimeUnitsInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoDelayedEventAssignmentInL2v3 = _libsbml.NoDelayedEventAssignmentInL2v3
## @var long NoDelayedEventAssignmentInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ModelSBOBranchChangedBeyondL2v3 = _libsbml.ModelSBOBranchChangedBeyondL2v3
## @var long ModelSBOBranchChangedBeyondL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictUnitsRequiredInL2v3 = _libsbml.StrictUnitsRequiredInL2v3
## @var long StrictUnitsRequiredInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StrictSBORequiredInL2v3 = _libsbml.StrictSBORequiredInL2v3
## @var long StrictSBORequiredInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateAnnotationInvalidInL2v3 = _libsbml.DuplicateAnnotationInvalidInL2v3
## @var long DuplicateAnnotationInvalidInL2v3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitOffsetInL2v4 = _libsbml.NoUnitOffsetInL2v4
## @var long NoUnitOffsetInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawTimeUnitsInL2v4 = _libsbml.NoKineticLawTimeUnitsInL2v4
## @var long NoKineticLawTimeUnitsInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawSubstanceUnitsInL2v4 = _libsbml.NoKineticLawSubstanceUnitsInL2v4
## @var long NoKineticLawSubstanceUnitsInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesSpatialSizeUnitsInL2v4 = _libsbml.NoSpeciesSpatialSizeUnitsInL2v4
## @var long NoSpeciesSpatialSizeUnitsInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoEventTimeUnitsInL2v4 = _libsbml.NoEventTimeUnitsInL2v4
## @var long NoEventTimeUnitsInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ModelSBOBranchChangedInL2v4 = _libsbml.ModelSBOBranchChangedInL2v4
## @var long ModelSBOBranchChangedInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateAnnotationInvalidInL2v4 = _libsbml.DuplicateAnnotationInvalidInL2v4
## @var long DuplicateAnnotationInvalidInL2v4
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesTypeInL3v1 = _libsbml.NoSpeciesTypeInL3v1
## @var long NoSpeciesTypeInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoCompartmentTypeInL3v1 = _libsbml.NoCompartmentTypeInL3v1
## @var long NoCompartmentTypeInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoUnitOffsetInL3v1 = _libsbml.NoUnitOffsetInL3v1
## @var long NoUnitOffsetInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawTimeUnitsInL3v1 = _libsbml.NoKineticLawTimeUnitsInL3v1
## @var long NoKineticLawTimeUnitsInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoKineticLawSubstanceUnitsInL3v1 = _libsbml.NoKineticLawSubstanceUnitsInL3v1
## @var long NoKineticLawSubstanceUnitsInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoSpeciesSpatialSizeUnitsInL3v1 = _libsbml.NoSpeciesSpatialSizeUnitsInL3v1
## @var long NoSpeciesSpatialSizeUnitsInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoEventTimeUnitsInL3v1 = _libsbml.NoEventTimeUnitsInL3v1
## @var long NoEventTimeUnitsInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ModelSBOBranchChangedInL3v1 = _libsbml.ModelSBOBranchChangedInL3v1
## @var long ModelSBOBranchChangedInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DuplicateAnnotationInvalidInL3v1 = _libsbml.DuplicateAnnotationInvalidInL3v1
## @var long DuplicateAnnotationInvalidInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoCompartmentOutsideInL3v1 = _libsbml.NoCompartmentOutsideInL3v1
## @var long NoCompartmentOutsideInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoStoichiometryMathInL3v1 = _libsbml.NoStoichiometryMathInL3v1
## @var long NoStoichiometryMathInL3v1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidSBMLLevelVersion = _libsbml.InvalidSBMLLevelVersion
## @var long InvalidSBMLLevelVersion
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AnnotationNotesNotAllowedLevel1 = _libsbml.AnnotationNotesNotAllowedLevel1
## @var long AnnotationNotesNotAllowedLevel1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidRuleOrdering = _libsbml.InvalidRuleOrdering
## @var long InvalidRuleOrdering
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RequiredPackagePresent = _libsbml.RequiredPackagePresent
## @var long RequiredPackagePresent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UnrequiredPackagePresent = _libsbml.UnrequiredPackagePresent
## @var long UnrequiredPackagePresent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SubsUnitsAllowedInKL = _libsbml.SubsUnitsAllowedInKL
## @var long SubsUnitsAllowedInKL
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TimeUnitsAllowedInKL = _libsbml.TimeUnitsAllowedInKL
## @var long TimeUnitsAllowedInKL
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FormulaInLevel1KL = _libsbml.FormulaInLevel1KL
## @var long FormulaInLevel1KL
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
TimeUnitsRemoved = _libsbml.TimeUnitsRemoved
## @var long TimeUnitsRemoved
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BadMathML = _libsbml.BadMathML
## @var long BadMathML
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FailedMathMLReadOfDouble = _libsbml.FailedMathMLReadOfDouble
## @var long FailedMathMLReadOfDouble
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FailedMathMLReadOfInteger = _libsbml.FailedMathMLReadOfInteger
## @var long FailedMathMLReadOfInteger
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FailedMathMLReadOfExponential = _libsbml.FailedMathMLReadOfExponential
## @var long FailedMathMLReadOfExponential
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FailedMathMLReadOfRational = _libsbml.FailedMathMLReadOfRational
## @var long FailedMathMLReadOfRational
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
BadMathMLNodeType = _libsbml.BadMathMLNodeType
## @var long BadMathMLNodeType
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidMathMLAttribute = _libsbml.InvalidMathMLAttribute
## @var long InvalidMathMLAttribute
##
## Attribute not allowed on this element.
NoTimeSymbolInFunctionDef = _libsbml.NoTimeSymbolInFunctionDef
## @var long NoTimeSymbolInFunctionDef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NoBodyInFunctionDef = _libsbml.NoBodyInFunctionDef
## @var long NoBodyInFunctionDef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InconsistentArgUnitsWarnings = _libsbml.InconsistentArgUnitsWarnings
## @var long InconsistentArgUnitsWarnings
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InconsistentPowerUnitsWarnings = _libsbml.InconsistentPowerUnitsWarnings
## @var long InconsistentPowerUnitsWarnings
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InconsistentExponUnitsWarnings = _libsbml.InconsistentExponUnitsWarnings
## @var long InconsistentExponUnitsWarnings
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredUnits = _libsbml.UndeclaredUnits
## @var long UndeclaredUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredTimeUnitsL3 = _libsbml.UndeclaredTimeUnitsL3
## @var long UndeclaredTimeUnitsL3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredExtentUnitsL3 = _libsbml.UndeclaredExtentUnitsL3
## @var long UndeclaredExtentUnitsL3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UndeclaredObjectUnitsL3 = _libsbml.UndeclaredObjectUnitsL3
## @var long UndeclaredObjectUnitsL3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UnrecognisedSBOTerm = _libsbml.UnrecognisedSBOTerm
## @var long UnrecognisedSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ObseleteSBOTerm = _libsbml.ObseleteSBOTerm
## @var long ObseleteSBOTerm
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IncorrectCompartmentSpatialDimensions = _libsbml.IncorrectCompartmentSpatialDimensions
## @var long IncorrectCompartmentSpatialDimensions
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CompartmentTypeNotValidAttribute = _libsbml.CompartmentTypeNotValidAttribute
## @var long CompartmentTypeNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstantNotValidAttribute = _libsbml.ConstantNotValidAttribute
## @var long ConstantNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MetaIdNotValidAttribute = _libsbml.MetaIdNotValidAttribute
## @var long MetaIdNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SBOTermNotValidAttributeBeforeL2V3 = _libsbml.SBOTermNotValidAttributeBeforeL2V3
## @var long SBOTermNotValidAttributeBeforeL2V3
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidL1CompartmentUnits = _libsbml.InvalidL1CompartmentUnits
## @var long InvalidL1CompartmentUnits
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L1V1CompartmentVolumeReqd = _libsbml.L1V1CompartmentVolumeReqd
## @var long L1V1CompartmentVolumeReqd
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
CompartmentTypeNotValidComponent = _libsbml.CompartmentTypeNotValidComponent
## @var long CompartmentTypeNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstraintNotValidComponent = _libsbml.ConstraintNotValidComponent
## @var long ConstraintNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
EventNotValidComponent = _libsbml.EventNotValidComponent
## @var long EventNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SBOTermNotValidAttributeBeforeL2V2 = _libsbml.SBOTermNotValidAttributeBeforeL2V2
## @var long SBOTermNotValidAttributeBeforeL2V2
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
FuncDefNotValidComponent = _libsbml.FuncDefNotValidComponent
## @var long FuncDefNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InitialAssignNotValidComponent = _libsbml.InitialAssignNotValidComponent
## @var long InitialAssignNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
VariableNotValidAttribute = _libsbml.VariableNotValidAttribute
## @var long VariableNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UnitsNotValidAttribute = _libsbml.UnitsNotValidAttribute
## @var long UnitsNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
ConstantSpeciesNotValidAttribute = _libsbml.ConstantSpeciesNotValidAttribute
## @var long ConstantSpeciesNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpatialSizeUnitsNotValidAttribute = _libsbml.SpatialSizeUnitsNotValidAttribute
## @var long SpatialSizeUnitsNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpeciesTypeNotValidAttribute = _libsbml.SpeciesTypeNotValidAttribute
## @var long SpeciesTypeNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
HasOnlySubsUnitsNotValidAttribute = _libsbml.HasOnlySubsUnitsNotValidAttribute
## @var long HasOnlySubsUnitsNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
IdNotValidAttribute = _libsbml.IdNotValidAttribute
## @var long IdNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
NameNotValidAttribute = _libsbml.NameNotValidAttribute
## @var long NameNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SpeciesTypeNotValidComponent = _libsbml.SpeciesTypeNotValidComponent
## @var long SpeciesTypeNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
StoichiometryMathNotValidComponent = _libsbml.StoichiometryMathNotValidComponent
## @var long StoichiometryMathNotValidComponent
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
MultiplierNotValidAttribute = _libsbml.MultiplierNotValidAttribute
## @var long MultiplierNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
OffsetNotValidAttribute = _libsbml.OffsetNotValidAttribute
## @var long OffsetNotValidAttribute
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3SpatialDimensionsUnset = _libsbml.L3SpatialDimensionsUnset
## @var long L3SpatialDimensionsUnset
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
UnknownCoreAttribute = _libsbml.UnknownCoreAttribute
## @var long UnknownCoreAttribute
##
## Encountered an unknow attribute in the core SBML Level 3 namespace.
UnknownPackageAttribute = _libsbml.UnknownPackageAttribute
## @var long UnknownPackageAttribute
##
## Encountered an unknown attribute in an SBML Level 3 package namespace.
PackageConversionNotSupported = _libsbml.PackageConversionNotSupported
## @var long PackageConversionNotSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
InvalidTargetLevelVersion = _libsbml.InvalidTargetLevelVersion
## @var long InvalidTargetLevelVersion
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3NotSupported = _libsbml.L3NotSupported
## @var long L3NotSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
SBMLCodesUpperBound = _libsbml.SBMLCodesUpperBound
## @var long SBMLCodesUpperBound
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
LIBSBML_CAT_SBML = _libsbml.LIBSBML_CAT_SBML
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: General SBML error not falling
## into another category below.
LIBSBML_CAT_SBML_L1_COMPAT = _libsbml.LIBSBML_CAT_SBML_L1_COMPAT
## @var long LIBSBML_CAT_SBML_L1_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model from SBML Level 2
## to SBML Level 1.
LIBSBML_CAT_SBML_L2V1_COMPAT = _libsbml.LIBSBML_CAT_SBML_L2V1_COMPAT
## @var long LIBSBML_CAT_SBML_L2V1_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model to SBML Level 2
## Version 1.
LIBSBML_CAT_SBML_L2V2_COMPAT = _libsbml.LIBSBML_CAT_SBML_L2V2_COMPAT
## @var long LIBSBML_CAT_SBML_L2V2_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model to SBML Level 2
## Version 2.
LIBSBML_CAT_GENERAL_CONSISTENCY = _libsbml.LIBSBML_CAT_GENERAL_CONSISTENCY
## @var long LIBSBML_CAT_GENERAL_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating general SBML constructs. With respect to the
## SBML specification, these concern failures in applying the
## validation rules numbered 2xxxx in the Level 2 Versions 2
## and 3 specifications.
LIBSBML_CAT_IDENTIFIER_CONSISTENCY = _libsbml.LIBSBML_CAT_IDENTIFIER_CONSISTENCY
## @var long LIBSBML_CAT_IDENTIFIER_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating symbol identifiers in a model. With respect
## to the SBML specification, these concern failures in applying the
## validation rules numbered 103xx in the Level 2 Versions 2
## and 3 specifications.
LIBSBML_CAT_UNITS_CONSISTENCY = _libsbml.LIBSBML_CAT_UNITS_CONSISTENCY
## @var long LIBSBML_CAT_UNITS_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating the units of measurement on quantities in a
## model. With respect to the SBML specification, these concern
## failures in applying the validation rules numbered 105xx in the
## Level 2 Versions 2 and 3 specifications.
LIBSBML_CAT_MATHML_CONSISTENCY = _libsbml.LIBSBML_CAT_MATHML_CONSISTENCY
## @var long LIBSBML_CAT_MATHML_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating MathML formulas in a model. With respect to
## the SBML specification, these concern failures in applying the
## validation rules numbered 102xx in the Level 2 Versions 2
## and 3 specifications.
LIBSBML_CAT_SBO_CONSISTENCY = _libsbml.LIBSBML_CAT_SBO_CONSISTENCY
## @var long LIBSBML_CAT_SBO_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating SBO identifiers in a model. With respect to
## the SBML specification, these concern failures in applying the
## validation rules numbered 107xx in the Level 2 Versions 2
## and 3 specifications.
LIBSBML_CAT_OVERDETERMINED_MODEL = _libsbml.LIBSBML_CAT_OVERDETERMINED_MODEL
## @var long LIBSBML_CAT_OVERDETERMINED_MODEL
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Error in the system of
## equations in the model: the system is overdetermined, therefore
## violating a tenet of proper SBML. With respect to the SBML
## specification, this is validation rule #10601 in the SBML
## Level 2 Versions 2 and 3 specifications.
LIBSBML_CAT_SBML_L2V3_COMPAT = _libsbml.LIBSBML_CAT_SBML_L2V3_COMPAT
## @var long LIBSBML_CAT_SBML_L2V3_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model to SBML Level 2
## Version 3.
LIBSBML_CAT_MODELING_PRACTICE = _libsbml.LIBSBML_CAT_MODELING_PRACTICE
## @var long LIBSBML_CAT_MODELING_PRACTICE
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of warnings about
## recommended good practices involving SBML and computational
## modeling. (These are tests performed by libSBML and do not have
## equivalent SBML validation rules.)
LIBSBML_CAT_INTERNAL_CONSISTENCY = _libsbml.LIBSBML_CAT_INTERNAL_CONSISTENCY
## @var long LIBSBML_CAT_INTERNAL_CONSISTENCY
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## occur while validating libSBML's internal representation of SBML
## constructs. (These are tests performed by libSBML and do not have
## equivalent SBML validation rules.)
LIBSBML_CAT_SBML_L2V4_COMPAT = _libsbml.LIBSBML_CAT_SBML_L2V4_COMPAT
## @var long LIBSBML_CAT_SBML_L2V4_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model to SBML Level 2
## Version 4.
LIBSBML_CAT_SBML_L3V1_COMPAT = _libsbml.LIBSBML_CAT_SBML_L3V1_COMPAT
## @var long LIBSBML_CAT_SBML_L3V1_COMPAT
## Category code for SBMLError diagnostics.
##
## This code has the following meaning: Category of errors that can
## only occur during attempted translation from one Level/Version of
## SBML to another. This particular category applies to errors
## encountered while trying to convert a model to SBML Level 3
## Version 1.
SBML_UNKNOWN = _libsbml.SBML_UNKNOWN
## @var long SBML_UNKNOWN
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_COMPARTMENT = _libsbml.SBML_COMPARTMENT
## @var long SBML_COMPARTMENT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_COMPARTMENT_TYPE = _libsbml.SBML_COMPARTMENT_TYPE
## @var long SBML_COMPARTMENT_TYPE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_CONSTRAINT = _libsbml.SBML_CONSTRAINT
## @var long SBML_CONSTRAINT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_DOCUMENT = _libsbml.SBML_DOCUMENT
## @var long SBML_DOCUMENT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_EVENT = _libsbml.SBML_EVENT
## @var long SBML_EVENT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_EVENT_ASSIGNMENT = _libsbml.SBML_EVENT_ASSIGNMENT
## @var long SBML_EVENT_ASSIGNMENT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_FUNCTION_DEFINITION = _libsbml.SBML_FUNCTION_DEFINITION
## @var long SBML_FUNCTION_DEFINITION
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_INITIAL_ASSIGNMENT = _libsbml.SBML_INITIAL_ASSIGNMENT
## @var long SBML_INITIAL_ASSIGNMENT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_KINETIC_LAW = _libsbml.SBML_KINETIC_LAW
## @var long SBML_KINETIC_LAW
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_LIST_OF = _libsbml.SBML_LIST_OF
## @var long SBML_LIST_OF
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_MODEL = _libsbml.SBML_MODEL
## @var long SBML_MODEL
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_PARAMETER = _libsbml.SBML_PARAMETER
## @var long SBML_PARAMETER
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_REACTION = _libsbml.SBML_REACTION
## @var long SBML_REACTION
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_RULE = _libsbml.SBML_RULE
## @var long SBML_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_SPECIES = _libsbml.SBML_SPECIES
## @var long SBML_SPECIES
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_SPECIES_REFERENCE = _libsbml.SBML_SPECIES_REFERENCE
## @var long SBML_SPECIES_REFERENCE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_SPECIES_TYPE = _libsbml.SBML_SPECIES_TYPE
## @var long SBML_SPECIES_TYPE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_MODIFIER_SPECIES_REFERENCE = _libsbml.SBML_MODIFIER_SPECIES_REFERENCE
## @var long SBML_MODIFIER_SPECIES_REFERENCE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_UNIT_DEFINITION = _libsbml.SBML_UNIT_DEFINITION
## @var long SBML_UNIT_DEFINITION
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_UNIT = _libsbml.SBML_UNIT
## @var long SBML_UNIT
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_ALGEBRAIC_RULE = _libsbml.SBML_ALGEBRAIC_RULE
## @var long SBML_ALGEBRAIC_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_ASSIGNMENT_RULE = _libsbml.SBML_ASSIGNMENT_RULE
## @var long SBML_ASSIGNMENT_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_RATE_RULE = _libsbml.SBML_RATE_RULE
## @var long SBML_RATE_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_SPECIES_CONCENTRATION_RULE = _libsbml.SBML_SPECIES_CONCENTRATION_RULE
## @var long SBML_SPECIES_CONCENTRATION_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_COMPARTMENT_VOLUME_RULE = _libsbml.SBML_COMPARTMENT_VOLUME_RULE
## @var long SBML_COMPARTMENT_VOLUME_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_PARAMETER_RULE = _libsbml.SBML_PARAMETER_RULE
## @var long SBML_PARAMETER_RULE
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_TRIGGER = _libsbml.SBML_TRIGGER
## @var long SBML_TRIGGER
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_DELAY = _libsbml.SBML_DELAY
## @var long SBML_DELAY
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_STOICHIOMETRY_MATH = _libsbml.SBML_STOICHIOMETRY_MATH
## @var long SBML_STOICHIOMETRY_MATH
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_LOCAL_PARAMETER = _libsbml.SBML_LOCAL_PARAMETER
## @var long SBML_LOCAL_PARAMETER
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
SBML_PRIORITY = _libsbml.SBML_PRIORITY
## @var long SBML_PRIORITY
## @brief One of the possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class {@link libsbml}.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
UNIT_KIND_AMPERE = _libsbml.UNIT_KIND_AMPERE
## @var long UNIT_KIND_AMPERE
## @brief One of the possible predefined SBML units.
UNIT_KIND_AVOGADRO = _libsbml.UNIT_KIND_AVOGADRO
## @var long UNIT_KIND_AVOGADRO
## @brief One of the possible predefined SBML units.
UNIT_KIND_BECQUEREL = _libsbml.UNIT_KIND_BECQUEREL
## @var long UNIT_KIND_BECQUEREL
## @brief One of the possible predefined SBML units.
UNIT_KIND_CANDELA = _libsbml.UNIT_KIND_CANDELA
## @var long UNIT_KIND_CANDELA
## @brief One of the possible predefined SBML units.
UNIT_KIND_CELSIUS = _libsbml.UNIT_KIND_CELSIUS
## @var long UNIT_KIND_CELSIUS
## @brief One of the possible predefined SBML units.
UNIT_KIND_COULOMB = _libsbml.UNIT_KIND_COULOMB
## @var long UNIT_KIND_COULOMB
## @brief One of the possible predefined SBML units.
UNIT_KIND_DIMENSIONLESS = _libsbml.UNIT_KIND_DIMENSIONLESS
## @var long UNIT_KIND_DIMENSIONLESS
## @brief One of the possible predefined SBML units.
UNIT_KIND_FARAD = _libsbml.UNIT_KIND_FARAD
## @var long UNIT_KIND_FARAD
## @brief One of the possible predefined SBML units.
UNIT_KIND_GRAM = _libsbml.UNIT_KIND_GRAM
## @var long UNIT_KIND_GRAM
## @brief One of the possible predefined SBML units.
UNIT_KIND_GRAY = _libsbml.UNIT_KIND_GRAY
## @var long UNIT_KIND_GRAY
## @brief One of the possible predefined SBML units.
UNIT_KIND_HENRY = _libsbml.UNIT_KIND_HENRY
## @var long UNIT_KIND_HENRY
## @brief One of the possible predefined SBML units.
UNIT_KIND_HERTZ = _libsbml.UNIT_KIND_HERTZ
## @var long UNIT_KIND_HERTZ
## @brief One of the possible predefined SBML units.
UNIT_KIND_ITEM = _libsbml.UNIT_KIND_ITEM
## @var long UNIT_KIND_ITEM
## @brief One of the possible predefined SBML units.
UNIT_KIND_JOULE = _libsbml.UNIT_KIND_JOULE
## @var long UNIT_KIND_JOULE
## @brief One of the possible predefined SBML units.
UNIT_KIND_KATAL = _libsbml.UNIT_KIND_KATAL
## @var long UNIT_KIND_KATAL
## @brief One of the possible predefined SBML units.
UNIT_KIND_KELVIN = _libsbml.UNIT_KIND_KELVIN
## @var long UNIT_KIND_KELVIN
## @brief One of the possible predefined SBML units.
UNIT_KIND_KILOGRAM = _libsbml.UNIT_KIND_KILOGRAM
## @var long UNIT_KIND_KILOGRAM
## @brief One of the possible predefined SBML units.
UNIT_KIND_LITER = _libsbml.UNIT_KIND_LITER
## @var long UNIT_KIND_LITER
## @brief One of the possible predefined SBML units.
UNIT_KIND_LITRE = _libsbml.UNIT_KIND_LITRE
## @var long UNIT_KIND_LITRE
## @brief One of the possible predefined SBML units.
UNIT_KIND_LUMEN = _libsbml.UNIT_KIND_LUMEN
## @var long UNIT_KIND_LUMEN
## @brief One of the possible predefined SBML units.
UNIT_KIND_LUX = _libsbml.UNIT_KIND_LUX
## @var long UNIT_KIND_LUX
## @brief One of the possible predefined SBML units.
UNIT_KIND_METER = _libsbml.UNIT_KIND_METER
## @var long UNIT_KIND_METER
## @brief One of the possible predefined SBML units.
UNIT_KIND_METRE = _libsbml.UNIT_KIND_METRE
## @var long UNIT_KIND_METRE
## @brief One of the possible predefined SBML units.
UNIT_KIND_MOLE = _libsbml.UNIT_KIND_MOLE
## @var long UNIT_KIND_MOLE
## @brief One of the possible predefined SBML units.
UNIT_KIND_NEWTON = _libsbml.UNIT_KIND_NEWTON
## @var long UNIT_KIND_NEWTON
## @brief One of the possible predefined SBML units.
UNIT_KIND_OHM = _libsbml.UNIT_KIND_OHM
## @var long UNIT_KIND_OHM
## @brief One of the possible predefined SBML units.
UNIT_KIND_PASCAL = _libsbml.UNIT_KIND_PASCAL
## @var long UNIT_KIND_PASCAL
## @brief One of the possible predefined SBML units.
UNIT_KIND_RADIAN = _libsbml.UNIT_KIND_RADIAN
## @var long UNIT_KIND_RADIAN
## @brief One of the possible predefined SBML units.
UNIT_KIND_SECOND = _libsbml.UNIT_KIND_SECOND
## @var long UNIT_KIND_SECOND
## @brief One of the possible predefined SBML units.
UNIT_KIND_SIEMENS = _libsbml.UNIT_KIND_SIEMENS
## @var long UNIT_KIND_SIEMENS
## @brief One of the possible predefined SBML units.
UNIT_KIND_SIEVERT = _libsbml.UNIT_KIND_SIEVERT
## @var long UNIT_KIND_SIEVERT
## @brief One of the possible predefined SBML units.
UNIT_KIND_STERADIAN = _libsbml.UNIT_KIND_STERADIAN
## @var long UNIT_KIND_STERADIAN
## @brief One of the possible predefined SBML units.
UNIT_KIND_TESLA = _libsbml.UNIT_KIND_TESLA
## @var long UNIT_KIND_TESLA
## @brief One of the possible predefined SBML units.
UNIT_KIND_VOLT = _libsbml.UNIT_KIND_VOLT
## @var long UNIT_KIND_VOLT
## @brief One of the possible predefined SBML units.
UNIT_KIND_WATT = _libsbml.UNIT_KIND_WATT
## @var long UNIT_KIND_WATT
## @brief One of the possible predefined SBML units.
UNIT_KIND_WEBER = _libsbml.UNIT_KIND_WEBER
## @var long UNIT_KIND_WEBER
## @brief One of the possible predefined SBML units.
UNIT_KIND_INVALID = _libsbml.UNIT_KIND_INVALID
## @var long UNIT_KIND_INVALID
## @brief One of the possible predefined SBML units.
RULE_TYPE_RATE = _libsbml.RULE_TYPE_RATE
## @var long RULE_TYPE_RATE
## @brief One of the possible SBML Rule object types.
RULE_TYPE_SCALAR = _libsbml.RULE_TYPE_SCALAR
## @var long RULE_TYPE_SCALAR
## @brief One of the possible SBML Rule object types.
RULE_TYPE_INVALID = _libsbml.RULE_TYPE_INVALID
## @var long RULE_TYPE_INVALID
## @brief One of the possible SBML Rule object types.
AST_UNKNOWN = _libsbml.AST_UNKNOWN
## @var long AST_UNKNOWN
## @brief One of the possible SBML Rule object types.
AST_ORIGINATES_IN_PACKAGE = _libsbml.AST_ORIGINATES_IN_PACKAGE
## @var long AST_ORIGINATES_IN_PACKAGE
## @brief This node uses math that is only available in an
## SBML Level 3 package.
AST_PLUS = _libsbml.AST_PLUS
## @var long AST_PLUS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_MINUS = _libsbml.AST_MINUS
## @var long AST_MINUS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_TIMES = _libsbml.AST_TIMES
## @var long AST_TIMES
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_DIVIDE = _libsbml.AST_DIVIDE
## @var long AST_DIVIDE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_POWER = _libsbml.AST_POWER
## @var long AST_POWER
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_INTEGER = _libsbml.AST_INTEGER
## @var long AST_INTEGER
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_REAL = _libsbml.AST_REAL
## @var long AST_REAL
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_REAL_E = _libsbml.AST_REAL_E
## @var long AST_REAL_E
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RATIONAL = _libsbml.AST_RATIONAL
## @var long AST_RATIONAL
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_NAME = _libsbml.AST_NAME
## @var long AST_NAME
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_NAME_AVOGADRO = _libsbml.AST_NAME_AVOGADRO
## @var long AST_NAME_AVOGADRO
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_NAME_TIME = _libsbml.AST_NAME_TIME
## @var long AST_NAME_TIME
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTANT_E = _libsbml.AST_CONSTANT_E
## @var long AST_CONSTANT_E
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTANT_FALSE = _libsbml.AST_CONSTANT_FALSE
## @var long AST_CONSTANT_FALSE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTANT_PI = _libsbml.AST_CONSTANT_PI
## @var long AST_CONSTANT_PI
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTANT_TRUE = _libsbml.AST_CONSTANT_TRUE
## @var long AST_CONSTANT_TRUE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_LAMBDA = _libsbml.AST_LAMBDA
## @var long AST_LAMBDA
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION = _libsbml.AST_FUNCTION
## @var long AST_FUNCTION
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ABS = _libsbml.AST_FUNCTION_ABS
## @var long AST_FUNCTION_ABS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCOS = _libsbml.AST_FUNCTION_ARCCOS
## @var long AST_FUNCTION_ARCCOS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCOSH = _libsbml.AST_FUNCTION_ARCCOSH
## @var long AST_FUNCTION_ARCCOSH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCOT = _libsbml.AST_FUNCTION_ARCCOT
## @var long AST_FUNCTION_ARCCOT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCOTH = _libsbml.AST_FUNCTION_ARCCOTH
## @var long AST_FUNCTION_ARCCOTH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCSC = _libsbml.AST_FUNCTION_ARCCSC
## @var long AST_FUNCTION_ARCCSC
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCCSCH = _libsbml.AST_FUNCTION_ARCCSCH
## @var long AST_FUNCTION_ARCCSCH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCSEC = _libsbml.AST_FUNCTION_ARCSEC
## @var long AST_FUNCTION_ARCSEC
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCSECH = _libsbml.AST_FUNCTION_ARCSECH
## @var long AST_FUNCTION_ARCSECH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCSIN = _libsbml.AST_FUNCTION_ARCSIN
## @var long AST_FUNCTION_ARCSIN
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCSINH = _libsbml.AST_FUNCTION_ARCSINH
## @var long AST_FUNCTION_ARCSINH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCTAN = _libsbml.AST_FUNCTION_ARCTAN
## @var long AST_FUNCTION_ARCTAN
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ARCTANH = _libsbml.AST_FUNCTION_ARCTANH
## @var long AST_FUNCTION_ARCTANH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_CEILING = _libsbml.AST_FUNCTION_CEILING
## @var long AST_FUNCTION_CEILING
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_COS = _libsbml.AST_FUNCTION_COS
## @var long AST_FUNCTION_COS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_COSH = _libsbml.AST_FUNCTION_COSH
## @var long AST_FUNCTION_COSH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_COT = _libsbml.AST_FUNCTION_COT
## @var long AST_FUNCTION_COT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_COTH = _libsbml.AST_FUNCTION_COTH
## @var long AST_FUNCTION_COTH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_CSC = _libsbml.AST_FUNCTION_CSC
## @var long AST_FUNCTION_CSC
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_CSCH = _libsbml.AST_FUNCTION_CSCH
## @var long AST_FUNCTION_CSCH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_DELAY = _libsbml.AST_FUNCTION_DELAY
## @var long AST_FUNCTION_DELAY
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_EXP = _libsbml.AST_FUNCTION_EXP
## @var long AST_FUNCTION_EXP
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_FACTORIAL = _libsbml.AST_FUNCTION_FACTORIAL
## @var long AST_FUNCTION_FACTORIAL
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_FLOOR = _libsbml.AST_FUNCTION_FLOOR
## @var long AST_FUNCTION_FLOOR
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_LN = _libsbml.AST_FUNCTION_LN
## @var long AST_FUNCTION_LN
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_LOG = _libsbml.AST_FUNCTION_LOG
## @var long AST_FUNCTION_LOG
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_PIECEWISE = _libsbml.AST_FUNCTION_PIECEWISE
## @var long AST_FUNCTION_PIECEWISE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_POWER = _libsbml.AST_FUNCTION_POWER
## @var long AST_FUNCTION_POWER
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_ROOT = _libsbml.AST_FUNCTION_ROOT
## @var long AST_FUNCTION_ROOT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_SEC = _libsbml.AST_FUNCTION_SEC
## @var long AST_FUNCTION_SEC
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_SECH = _libsbml.AST_FUNCTION_SECH
## @var long AST_FUNCTION_SECH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_SIN = _libsbml.AST_FUNCTION_SIN
## @var long AST_FUNCTION_SIN
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_SINH = _libsbml.AST_FUNCTION_SINH
## @var long AST_FUNCTION_SINH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_TAN = _libsbml.AST_FUNCTION_TAN
## @var long AST_FUNCTION_TAN
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_FUNCTION_TANH = _libsbml.AST_FUNCTION_TANH
## @var long AST_FUNCTION_TANH
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_LOGICAL_AND = _libsbml.AST_LOGICAL_AND
## @var long AST_LOGICAL_AND
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_LOGICAL_NOT = _libsbml.AST_LOGICAL_NOT
## @var long AST_LOGICAL_NOT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_LOGICAL_OR = _libsbml.AST_LOGICAL_OR
## @var long AST_LOGICAL_OR
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_LOGICAL_XOR = _libsbml.AST_LOGICAL_XOR
## @var long AST_LOGICAL_XOR
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_EQ = _libsbml.AST_RELATIONAL_EQ
## @var long AST_RELATIONAL_EQ
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_GEQ = _libsbml.AST_RELATIONAL_GEQ
## @var long AST_RELATIONAL_GEQ
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_GT = _libsbml.AST_RELATIONAL_GT
## @var long AST_RELATIONAL_GT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_LEQ = _libsbml.AST_RELATIONAL_LEQ
## @var long AST_RELATIONAL_LEQ
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_LT = _libsbml.AST_RELATIONAL_LT
## @var long AST_RELATIONAL_LT
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_RELATIONAL_NEQ = _libsbml.AST_RELATIONAL_NEQ
## @var long AST_RELATIONAL_NEQ
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTRUCTOR_OTHERWISE = _libsbml.AST_CONSTRUCTOR_OTHERWISE
## @var long AST_CONSTRUCTOR_OTHERWISE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_CONSTRUCTOR_PIECE = _libsbml.AST_CONSTRUCTOR_PIECE
## @var long AST_CONSTRUCTOR_PIECE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_QUALIFIER_BVAR = _libsbml.AST_QUALIFIER_BVAR
## @var long AST_QUALIFIER_BVAR
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_QUALIFIER_DEGREE = _libsbml.AST_QUALIFIER_DEGREE
## @var long AST_QUALIFIER_DEGREE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_QUALIFIER_LOGBASE = _libsbml.AST_QUALIFIER_LOGBASE
## @var long AST_QUALIFIER_LOGBASE
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
AST_SEMANTICS = _libsbml.AST_SEMANTICS
## @var long AST_SEMANTICS
##
## One of the possible ASTNode types. Each ASTNode has
## a type whose value is one of the elements of this enumeration.
L3P_PARSE_LOG_AS_LOG10 = _libsbml.L3P_PARSE_LOG_AS_LOG10
## @var long L3P_PARSE_LOG_AS_LOG10
##
## Parse <code>log(x)</code> as the natural logarithm of <code>x</code>.
L3P_PARSE_LOG_AS_LN = _libsbml.L3P_PARSE_LOG_AS_LN
## @var long L3P_PARSE_LOG_AS_LN
##
## Refuse to parse <code>log(x)</code> at all, and set an error message
## telling the user to use <code>log10(x)</code>, <code>ln(x)</code>,
## or <code>log(base, x)</code> instead.
L3P_PARSE_LOG_AS_ERROR = _libsbml.L3P_PARSE_LOG_AS_ERROR
## @var long L3P_PARSE_LOG_AS_ERROR
##
## Collapse unary minuses where possible when parsing text-string
## formulas.
L3P_COLLAPSE_UNARY_MINUS = _libsbml.L3P_COLLAPSE_UNARY_MINUS
## @var long L3P_COLLAPSE_UNARY_MINUS
##
## Retain unary minuses in the AST representation when parsing
## text-string formulas.
L3P_EXPAND_UNARY_MINUS = _libsbml.L3P_EXPAND_UNARY_MINUS
## @var long L3P_EXPAND_UNARY_MINUS
##
## Parse units in text-string formulas when parsing
## text-string formulas.
L3P_PARSE_UNITS = _libsbml.L3P_PARSE_UNITS
## @var long L3P_PARSE_UNITS
##
## Do not recognize units in text-string formulas—treat them as
## errors.
L3P_NO_UNITS = _libsbml.L3P_NO_UNITS
## @var long L3P_NO_UNITS
##
## Recognize 'avogadro' as an SBML Level 3 symbol when parsing
## text-string formulas.
L3P_AVOGADRO_IS_CSYMBOL = _libsbml.L3P_AVOGADRO_IS_CSYMBOL
## @var long L3P_AVOGADRO_IS_CSYMBOL
##
## Do not treat 'avogadro' specially—consider it a plain symbol
## name when parsing text-string formulas.
L3P_AVOGADRO_IS_NAME = _libsbml.L3P_AVOGADRO_IS_NAME
## @var long L3P_AVOGADRO_IS_NAME
##
MODEL_QUALIFIER = _libsbml.MODEL_QUALIFIER
## @var long MODEL_QUALIFIER
##
## One of the possible MIRIAM annotation types used by CVTerm.
BIOLOGICAL_QUALIFIER = _libsbml.BIOLOGICAL_QUALIFIER
## @var long BIOLOGICAL_QUALIFIER
##
## One of the possible MIRIAM annotation types used by CVTerm.
UNKNOWN_QUALIFIER = _libsbml.UNKNOWN_QUALIFIER
## @var long UNKNOWN_QUALIFIER
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQM_IS = _libsbml.BQM_IS
## @var long BQM_IS
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQM_IS_DESCRIBED_BY = _libsbml.BQM_IS_DESCRIBED_BY
## @var long BQM_IS_DESCRIBED_BY
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQM_IS_DERIVED_FROM = _libsbml.BQM_IS_DERIVED_FROM
## @var long BQM_IS_DERIVED_FROM
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQM_UNKNOWN = _libsbml.BQM_UNKNOWN
## @var long BQM_UNKNOWN
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS = _libsbml.BQB_IS
## @var long BQB_IS
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_HAS_PART = _libsbml.BQB_HAS_PART
## @var long BQB_HAS_PART
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_PART_OF = _libsbml.BQB_IS_PART_OF
## @var long BQB_IS_PART_OF
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_VERSION_OF = _libsbml.BQB_IS_VERSION_OF
## @var long BQB_IS_VERSION_OF
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_HAS_VERSION = _libsbml.BQB_HAS_VERSION
## @var long BQB_HAS_VERSION
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_HOMOLOG_TO = _libsbml.BQB_IS_HOMOLOG_TO
## @var long BQB_IS_HOMOLOG_TO
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_DESCRIBED_BY = _libsbml.BQB_IS_DESCRIBED_BY
## @var long BQB_IS_DESCRIBED_BY
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_ENCODED_BY = _libsbml.BQB_IS_ENCODED_BY
## @var long BQB_IS_ENCODED_BY
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_ENCODES = _libsbml.BQB_ENCODES
## @var long BQB_ENCODES
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_OCCURS_IN = _libsbml.BQB_OCCURS_IN
## @var long BQB_OCCURS_IN
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_HAS_PROPERTY = _libsbml.BQB_HAS_PROPERTY
## @var long BQB_HAS_PROPERTY
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_IS_PROPERTY_OF = _libsbml.BQB_IS_PROPERTY_OF
## @var long BQB_IS_PROPERTY_OF
##
## One of the possible MIRIAM annotation types used by CVTerm.
BQB_UNKNOWN = _libsbml.BQB_UNKNOWN
## @var long BQB_UNKNOWN
##
## One of the possible MIRIAM annotation types used by CVTerm.
LIBSBML_OPERATION_SUCCESS = _libsbml.LIBSBML_OPERATION_SUCCESS
## @var long LIBSBML_OPERATION_SUCCESS
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The operation was successful.
LIBSBML_INDEX_EXCEEDS_SIZE = _libsbml.LIBSBML_INDEX_EXCEEDS_SIZE
## @var long LIBSBML_INDEX_EXCEEDS_SIZE
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: An index parameter exceeded the
## bounds of a data array or other collection used in the operation.
## This return value is typically returned by methods that take index
## numbers to refer to lists of objects, when the caller has provided
## an index that exceeds the bounds of the list. LibSBML provides
## methods for checking the size of list/sequence/collection
## structures, and callers should verify the sizes before calling
## methods that take index numbers.
LIBSBML_UNEXPECTED_ATTRIBUTE = _libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE
## @var long LIBSBML_UNEXPECTED_ATTRIBUTE
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The attribute that is the
## subject of this operation is not valid for the combination of SBML
## Level and Version for the underlying object. This can happen
## because libSBML strives to offer a uniform API for all SBML Levels
## and Versions, but some object attributes and elements are not
## defined for all SBML Levels and Versions. Calling programs are
## expected to be aware of which object structures they are working
## with, but when errors of this kind occur, they are reported using
## this return value.
LIBSBML_OPERATION_FAILED = _libsbml.LIBSBML_OPERATION_FAILED
## @var long LIBSBML_OPERATION_FAILED
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The requested action could not
## be performed. This can occur in a variety of contexts, such as
## passing a null object as a parameter in a situation where it does
## not make sense to permit a null object.
LIBSBML_INVALID_ATTRIBUTE_VALUE = _libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE
## @var long LIBSBML_INVALID_ATTRIBUTE_VALUE
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: A value passed as an argument
## to the method is not of a type that is valid for the operation or
## kind of object involved. For example, this return code is used when
## a calling program attempts to set an SBML object identifier to a
## string whose syntax does not conform to the SBML identifier syntax.
LIBSBML_INVALID_OBJECT = _libsbml.LIBSBML_INVALID_OBJECT
## @var long LIBSBML_INVALID_OBJECT
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The object passed as an
## argument to the method is not of a type that is valid for the
## operation or kind of object involved. For example, handing an
## invalidly-constructed {@link ASTNode} to a method expecting an
## {@link ASTNode} will result in this error.
LIBSBML_DUPLICATE_OBJECT_ID = _libsbml.LIBSBML_DUPLICATE_OBJECT_ID
## @var long LIBSBML_DUPLICATE_OBJECT_ID
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: There already exists an object
## with this identifier in the context where this operation is being
## attempted. This error is typically returned in situations where
## SBML object identifiers must be unique, such as attempting to add
## two species with the same identifier to a model.
LIBSBML_LEVEL_MISMATCH = _libsbml.LIBSBML_LEVEL_MISMATCH
## @var long LIBSBML_LEVEL_MISMATCH
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The SBML Level associated with
## the object does not match the Level of the parent object. This
## error can happen when an SBML component such as a species or
## compartment object is created outside of a model and a calling
## program then attempts to add the object to a model that has a
## different SBML Level defined.
LIBSBML_VERSION_MISMATCH = _libsbml.LIBSBML_VERSION_MISMATCH
## @var long LIBSBML_VERSION_MISMATCH
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The SBML Version within the
## SBML Level associated with the object does not match the Version of
## the parent object. This error can happen when an SBML component
## such as a species or compartment object is created outside of a
## model and a calling program then attempts to add the object to a
## model that has a different SBML Level+Version combination.
LIBSBML_INVALID_XML_OPERATION = _libsbml.LIBSBML_INVALID_XML_OPERATION
## @var long LIBSBML_INVALID_XML_OPERATION
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The XML operation attempted is
## not valid for the object or context involved. This error is
## typically returned by the XML interface layer of libSBML, when a
## calling program attempts to construct or manipulate XML in an
## invalid way.
LIBSBML_NAMESPACES_MISMATCH = _libsbml.LIBSBML_NAMESPACES_MISMATCH
## @var long LIBSBML_NAMESPACES_MISMATCH
## @brief One of the possible libSBML operation return codes.
##
## This code has the following meaning: The operation attempt could not
## be performed because the object(s) involved have mismatched XML
## namespaces for SBML Level/Versions. This typically means the
## properties of the {@link SBMLNamespaces} objects possessed by the
## SBML objects do not correspond in some way.
LIBSBML_PKG_VERSION_MISMATCH = _libsbml.LIBSBML_PKG_VERSION_MISMATCH
## @var long LIBSBML_PKG_VERSION_MISMATCH
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: the Version of the package
## extension within the SBML Level and version associated with the
## object does not match the Version of the parent object. This error
## can happen when an SBML component object is created outside of a
## model, and a calling program then attempts to add the object to a
## model that has a different SBML Level+Version+Package Version
## combination.
LIBSBML_DUPLICATE_ANNOTATION_NS = _libsbml.LIBSBML_DUPLICATE_ANNOTATION_NS
## @var long LIBSBML_DUPLICATE_ANNOTATION_NS
## @brief There already exists a top level annotation with the same
## namespace as annoation being appended.
##
## This error is typically returned in situations where the
## appendAnnotation function is being used to add an annotation that has
## a namespace that is already present in the existing annotation.
LIBSBML_ANNOTATION_NAME_NOT_FOUND = _libsbml.LIBSBML_ANNOTATION_NAME_NOT_FOUND
## @var long LIBSBML_ANNOTATION_NAME_NOT_FOUND
## @brief The existing annotation does not have a top-level element with
## the given name.
##
## This error is typically returned in situations where the
## replaceTopLevelAnnotationElement function or the
## removeTopLevelAnnotationElement function is being used to replace or
## remove an annotation with a name that does not match the name of any
## top-level element that is already present in the existing annotation.
LIBSBML_ANNOTATION_NS_NOT_FOUND = _libsbml.LIBSBML_ANNOTATION_NS_NOT_FOUND
## @var long LIBSBML_ANNOTATION_NS_NOT_FOUND
## @brief The existing annotation does not have a top-level element with
## the given namespace.
##
## This error is typically returned in situations where the
## replaceTopLevelAnnotationElement function or the
## removeTopLevelAnnotationElement function is being used to replace or
## remove an annotation with a namespace that does not match the
## namespace of any top-level element that is already present in the
## existing annotation.
LIBSBML_PKG_UNKNOWN = _libsbml.LIBSBML_PKG_UNKNOWN
## @var long LIBSBML_PKG_UNKNOWN
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: the required package extension
## is unknown. This error is typically returned when creating an object
## of {@link SBase} derived class with the required package, creating
## an object of {@link SBMLNamespaces} or its derived class with the
## required package, or invoking functions depending on the required
## package. To avoid this error, the library of the required package
## needs to be linked.
LIBSBML_PKG_UNKNOWN_VERSION = _libsbml.LIBSBML_PKG_UNKNOWN_VERSION
## @var long LIBSBML_PKG_UNKNOWN_VERSION
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: The required version of the
## package extension is unknown. This error is typically returned when
## creating an object of {@link SBase} derived class with the required
## package, creating an object of {@link SBMLNamespaces} or its derived
## class with the required package, or invoking functions depending on
## the required package. This error may be avoided by updating the
## library of the required package to be linked.
LIBSBML_PKG_DISABLED = _libsbml.LIBSBML_PKG_DISABLED
## @var long LIBSBML_PKG_DISABLED
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: The required package extension
## is disabled. This error is typically returned when creating an
## object of {@link SBase} derived class with the required package,
## creating an object of {@link SBMLNamespaces} or its derived class
## with the required package, or invoking functions depending on the
## required package. To avoid this error, the library of the required
## package needs to be enabled.
LIBSBML_PKG_CONFLICTED_VERSION = _libsbml.LIBSBML_PKG_CONFLICTED_VERSION
## @var long LIBSBML_PKG_CONFLICTED_VERSION
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: another version of the required
## package extension has already been enabled in the target SBase
## object, or enabled in the model to/in which the target object to be
## added/contained. This error is typically returned when adding an
## object of some {@link SBase} derived class with the required package
## to other {@link SBase} derived object, or when enabling the required
## package in the target object. To avoid this error, the conflict of
## versions need to be avoided.
LIBSBML_PKG_CONFLICT = _libsbml.LIBSBML_PKG_CONFLICT
## @var long LIBSBML_PKG_CONFLICT
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: another SBML package extension
## for the same URI has already been registered. This error is
## typically returned when adding a SBML package extension to the
## {@link SBMLExtensionRegistry}. To avoid this error, ensure that SBML
## package extensions are only registered once.
LIBSBML_CONV_INVALID_TARGET_NAMESPACE = _libsbml.LIBSBML_CONV_INVALID_TARGET_NAMESPACE
## @var long LIBSBML_CONV_INVALID_TARGET_NAMESPACE
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: while attempting to convert the
## SBML document using {@link SBMLLevelVersionConverter#convert()} or
## related methods, the target namespace has been found to be invalid
## or unset. (The function {@link SBMLNamespaces#isValidCombination()}
## may be useful in detecting this situation and preventing the error.)
LIBSBML_CONV_PKG_CONVERSION_NOT_AVAILABLE = _libsbml.LIBSBML_CONV_PKG_CONVERSION_NOT_AVAILABLE
## @var long LIBSBML_CONV_PKG_CONVERSION_NOT_AVAILABLE
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: conversions involving SBML
## Level 3 packages are not available in the given libSBML
## method. This error is typically returned when calling a converter
## that does not have the functionality to deal with SBML packages. To
## avoid this error, ensure that the requested {@link
## ConversionProperties} specifies packages.
LIBSBML_CONV_INVALID_SRC_DOCUMENT = _libsbml.LIBSBML_CONV_INVALID_SRC_DOCUMENT
## @var long LIBSBML_CONV_INVALID_SRC_DOCUMENT
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: The document on which
## conversion is being requested is invalid and the requested
## conversion cannot be performed. This error is typically returned
## when a conversion routine has been given an invalid target document
## or the conversion requires a certain degree of validity that is not
## present in the document. To avoid this error use the {@link
## SBMLDocument#checkConsistency()} function to find and resolve errors
## before passing the document to a conversion method.
LIBSBML_CONV_CONVERSION_NOT_AVAILABLE = _libsbml.LIBSBML_CONV_CONVERSION_NOT_AVAILABLE
## @var long LIBSBML_CONV_CONVERSION_NOT_AVAILABLE
## @brief One of the possible libSBML package operation return codes.
##
## This code has the following meaning: conversion with the given
## properties is not yet available.
LIBSBML_CONV_PKG_CONSIDERED_UNKNOWN = _libsbml.LIBSBML_CONV_PKG_CONSIDERED_UNKNOWN
## @var long LIBSBML_CONV_PKG_CONSIDERED_UNKNOWN
## @brief The package that is being stripped is not an enabled package
## but considered by libSBML to be an unrecognized package.
##
## This error is typically returned when calling the StripPackage
## converter requesting that a package for which code is not available be
## stripped. Thus the containing document will not be altered as the
## elements and attributes for this package are stored as unknown package
## information and will be written out as such.
CNV_TYPE_BOOL = _libsbml.CNV_TYPE_BOOL
## @var long CNV_TYPE_BOOL
## @brief One of the possible ConversionOption data type indicators.
##
## Indicates the value type is a Boolean.
CNV_TYPE_DOUBLE = _libsbml.CNV_TYPE_DOUBLE
## @var long CNV_TYPE_DOUBLE
## @brief One of the possible ConversionOption data type indicators.
##
## Indicates the value type is a double-sized float.
CNV_TYPE_INT = _libsbml.CNV_TYPE_INT
## @var long CNV_TYPE_INT
## @brief One of the possible ConversionOption data type indicators.
##
## Indicates the value type is an integer.
CNV_TYPE_SINGLE = _libsbml.CNV_TYPE_SINGLE
## @var long CNV_TYPE_SINGLE
## @brief One of the possible ConversionOption data type indicators.
##
## Indicates the value type is a float.
CNV_TYPE_STRING = _libsbml.CNV_TYPE_STRING
## @var long CNV_TYPE_STRING
## @brief One of the possible ConversionOption data type indicators.
##
## Indicates the value type is a string.
LIBSBML_DOTTED_VERSION = _libsbml.LIBSBML_DOTTED_VERSION
## @var long LIBSBML_DOTTED_VERSION
## @brief A version string of the form "1.2.3".
LIBSBML_VERSION = _libsbml.LIBSBML_VERSION
## @var long LIBSBML_VERSION
##
## The version as an integer: version 1.2.3 becomes 10203. Since the major
## number comes first, the overall number will always increase when a new
## libSBML is released, making it easy to use less-than and greater-than
## comparisons when testing versions numbers.
LIBSBML_VERSION_STRING = _libsbml.LIBSBML_VERSION_STRING
## @var long LIBSBML_DOTTED_VERSION
## @brief The numeric version as a string: version 1.2.3 becomes "10203".
## SBMLCompTypeCode_t
SBML_COMP_SUBMODEL = _libsbml.SBML_COMP_SUBMODEL
## @var long SBML_COMP_SUBMODEL
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_MODELDEFINITION = _libsbml.SBML_COMP_MODELDEFINITION
## @var long SBML_COMP_MODELDEFINITION
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_EXTERNALMODELDEFINITION = _libsbml.SBML_COMP_EXTERNALMODELDEFINITION
## @var long SBML_COMP_EXTERNALMODELDEFINITION
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_SBASEREF = _libsbml.SBML_COMP_SBASEREF
## @var long SBML_COMP_SBASEREF
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_DELETION = _libsbml.SBML_COMP_DELETION
## @var long SBML_COMP_DELETION
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_REPLACEDELEMENT = _libsbml.SBML_COMP_REPLACEDELEMENT
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_REPLACEDBY = _libsbml.SBML_COMP_REPLACEDBY
## @var long SBML_COMP_REPLACEDBY
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
SBML_COMP_PORT = _libsbml.SBML_COMP_PORT
## @var long SBML_COMP_PORT
##
## <span class="pkg-marker pkg-color-comp">comp</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Hierarchical Model
## Composition (“comp”) package. It is used to identify
## the type of SBML component to which a given object corresponds.
## SBMLFbcTypeCode_t
SBML_FBC_ASSOCIATION = _libsbml.SBML_FBC_ASSOCIATION
## @var long SBML_FBC_ASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Flux Balance Constraints
## (“fbc”) package. It is used to identify the type of SBML
## component to which a given object corresponds.
SBML_FBC_FLUXBOUND = _libsbml.SBML_FBC_FLUXBOUND
## @var long SBML_FBC_FLUXBOUND
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Flux Balance Constraints
## (“fbc”) package. It is used to identify the type of SBML
## component to which a given object corresponds.
SBML_FBC_FLUXOBJECTIVE = _libsbml.SBML_FBC_FLUXOBJECTIVE
## @var long SBML_FBC_FLUXOBJECTIVE
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Flux Balance Constraints
## (“fbc”) package. It is used to identify the type of SBML
## component to which a given object corresponds.
SBML_FBC_GENEASSOCIATION = _libsbml.SBML_FBC_GENEASSOCIATION
## @var long SBML_FBC_GENEASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Flux Balance Constraints
## (“fbc”) package. It is used to identify the type of SBML
## component to which a given object corresponds.
SBML_FBC_OBJECTIVE = _libsbml.SBML_FBC_OBJECTIVE
## @var long SBML_FBC_OBJECTIVE
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Flux Balance Constraints
## (“fbc”) package. It is used to identify the type of SBML
## component to which a given object corresponds.
## AssociationTypeCode_t
GENE_ASSOCIATION = _libsbml.GENE_ASSOCIATION
## @var long GENE_ASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Association types.
##
## The Association class is not part of the official SBML Level 3
## Flux Balance Constraints specification, but is instead a proposed
## future development of the package. If adopted, it would be a child of
## a GeneAssociation that would describe a single 'and' or 'or'
## relationship between two or more genes or other associations.
##
## The present code is one of the possible Association types for this
## proposed SBML Level 3 Flux Balance Constraints
## (“fbc”) package future development.
AND_ASSOCIATION = _libsbml.AND_ASSOCIATION
## @var long AND_ASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Association types.
##
## The Association class is not part of the official SBML Level 3
## Flux Balance Constraints specification, but is instead a proposed
## future development of the package. If adopted, it would be a child of
## a GeneAssociation that would describe a single 'and' or 'or'
## relationship between two or more genes or other associations.
##
## The present code is one of the possible Association types for this
## proposed SBML Level 3 Flux Balance Constraints
## (“fbc”) package future development.
OR_ASSOCIATION = _libsbml.OR_ASSOCIATION
## @var long OR_ASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Association types.
##
## The Association class is not part of the official SBML Level 3
## Flux Balance Constraints specification, but is instead a proposed
## future development of the package. If adopted, it would be a child of
## a GeneAssociation that would describe a single 'and' or 'or'
## relationship between two or more genes or other associations.
##
## The present code is one of the possible Association types for this
## proposed SBML Level 3 Flux Balance Constraints
## (“fbc”) package future development.
UNKNOWN_ASSOCIATION = _libsbml.UNKNOWN_ASSOCIATION
## @var long UNKNOWN_ASSOCIATION
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Association types.
##
## The Association class is not part of the official SBML Level 3
## Flux Balance Constraints specification, but is instead a proposed
## future development of the package. If adopted, it would be a child of
## a GeneAssociation that would describe a single 'and' or 'or'
## relationship between two or more genes or other associations.
##
## The present code is one of the possible Association types for this
## proposed SBML Level 3 Flux Balance Constraints
## (“fbc”) package future development.
## FluxBoundOperation_t
FLUXBOUND_OPERATION_LESS_EQUAL = _libsbml.FLUXBOUND_OPERATION_LESS_EQUAL
## @var long FLUXBOUND_OPERATION_LESS_EQUAL
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
FLUXBOUND_OPERATION_GREATER_EQUAL = _libsbml.FLUXBOUND_OPERATION_GREATER_EQUAL
## @var long FLUXBOUND_OPERATION_GREATER_EQUAL
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
FLUXBOUND_OPERATION_LESS = _libsbml.FLUXBOUND_OPERATION_LESS
## @var long FLUXBOUND_OPERATION_LESS
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
FLUXBOUND_OPERATION_GREATER = _libsbml.FLUXBOUND_OPERATION_GREATER
## @var long FLUXBOUND_OPERATION_GREATER
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
FLUXBOUND_OPERATION_EQUAL = _libsbml.FLUXBOUND_OPERATION_EQUAL
## @var long FLUXBOUND_OPERATION_EQUAL
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
FLUXBOUND_OPERATION_UNKNOWN = _libsbml.FLUXBOUND_OPERATION_UNKNOWN
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible FluxBound operation types.
##
## The FluxBound class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to to hold a single equality or
## inequality that represents the maximum or minimum value a reaction
## flux can obtain at steady state. One of the attributes of FluxBound
## is "operation". This code is one of the possible values of the
## "operation" attribute. The possible legal values are less than or
## equal to, greater than or equal to, or equal to. The additional two
## options "less than" and "greater than" are not legal values for the
## FluxBound "operation" attribute, but are provided to allow backwards
## compatibility with an earlier version of the draft specification.
## ObjectiveType_t
OBJECTIVE_TYPE_MAXIMIZE = _libsbml.OBJECTIVE_TYPE_MAXIMIZE
## @var long OBJECTIVE_TYPE_MAXIMIZE
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Objective types.
##
## The Objective class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to represent the so-called
## <em>objective function</em>, which generally consist of a linear
## combination ofmodel variables (fluxes) and a sense (direction). The
## Objective class has a "type" attribute, and the present code is one of
## possible type values.
##
OBJECTIVE_TYPE_MINIMIZE = _libsbml.OBJECTIVE_TYPE_MINIMIZE
## @var long OBJECTIVE_TYPE_MINIMIZE
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Objective types.
##
## The Objective class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to represent the so-called
## <em>objective function</em>, which generally consist of a linear
## combination ofmodel variables (fluxes) and a sense (direction). The
## Objective class has a "type" attribute, and the present code is one of
## possible type values.
##
OBJECTIVE_TYPE_UNKNOWN = _libsbml.OBJECTIVE_TYPE_UNKNOWN
## @var long OBJECTIVE_TYPE_UNKNOWN
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> One of the
## possible Objective types.
##
## The Objective class is part of the SBML Level 3 Flux Balanced
## Constraints package. Its purpose is to represent the so-called
## <em>objective function</em>, which generally consist of a linear
## combination ofmodel variables (fluxes) and a sense (direction). The
## Objective class has a "type" attribute, and the present code is one of
## possible type values.
##
## SBMLLayoutTypeCode_t
SBML_LAYOUT_BOUNDINGBOX = _libsbml.SBML_LAYOUT_BOUNDINGBOX
## @var long SBML_LAYOUT_BOUNDINGBOX
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_COMPARTMENTGLYPH = _libsbml.SBML_LAYOUT_COMPARTMENTGLYPH
## @var long SBML_LAYOUT_COMPARTMENTGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_CUBICBEZIER = _libsbml.SBML_LAYOUT_CUBICBEZIER
## @var long SBML_LAYOUT_CUBICBEZIER
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_CURVE = _libsbml.SBML_LAYOUT_CURVE
## @var long SBML_LAYOUT_CURVE
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_DIMENSIONS = _libsbml.SBML_LAYOUT_DIMENSIONS
## @var long SBML_LAYOUT_DIMENSIONS
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_GRAPHICALOBJECT = _libsbml.SBML_LAYOUT_GRAPHICALOBJECT
## @var long SBML_LAYOUT_GRAPHICALOBJECT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_LAYOUT = _libsbml.SBML_LAYOUT_LAYOUT
## @var long SBML_LAYOUT_LAYOUT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_LINESEGMENT = _libsbml.SBML_LAYOUT_LINESEGMENT
## @var long SBML_LAYOUT_LINESEGMENT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_POINT = _libsbml.SBML_LAYOUT_POINT
## @var long SBML_LAYOUT_POINT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_REACTIONGLYPH = _libsbml.SBML_LAYOUT_REACTIONGLYPH
## @var long SBML_LAYOUT_REACTIONGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_SPECIESGLYPH = _libsbml.SBML_LAYOUT_SPECIESGLYPH
## @var long SBML_LAYOUT_SPECIESGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_SPECIESREFERENCEGLYPH = _libsbml.SBML_LAYOUT_SPECIESREFERENCEGLYPH
## @var long SBML_LAYOUT_SPECIESREFERENCEGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_TEXTGLYPH = _libsbml.SBML_LAYOUT_TEXTGLYPH
## @var long SBML_LAYOUT_TEXTGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_REFERENCEGLYPH = _libsbml.SBML_LAYOUT_REFERENCEGLYPH
## @var long SBML_LAYOUT_REFERENCEGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_LAYOUT_GENERALGLYPH = _libsbml.SBML_LAYOUT_GENERALGLYPH
## @var long SBML_LAYOUT_GENERALGLYPH
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Layout
## (“layout”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
## SpeciesReferenceRole_t
SPECIES_ROLE_UNDEFINED = _libsbml.SPECIES_ROLE_UNDEFINED
## @var long SPECIES_ROLE_UNDEFINED
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_SUBSTRATE = _libsbml.SPECIES_ROLE_SUBSTRATE
## @var long SPECIES_ROLE_SUBSTRATE
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_PRODUCT = _libsbml.SPECIES_ROLE_PRODUCT
## @var long SPECIES_ROLE_PRODUCT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_SIDESUBSTRATE = _libsbml.SPECIES_ROLE_SIDESUBSTRATE
## @var long SPECIES_ROLE_SIDESUBSTRATE
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_SIDEPRODUCT = _libsbml.SPECIES_ROLE_SIDEPRODUCT
## @var long SPECIES_ROLE_SIDEPRODUCT
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_MODIFIER = _libsbml.SPECIES_ROLE_MODIFIER
## @var long SPECIES_ROLE_MODIFIER
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_ACTIVATOR = _libsbml.SPECIES_ROLE_ACTIVATOR
## @var long SPECIES_ROLE_ACTIVATOR
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
SPECIES_ROLE_INHIBITOR = _libsbml.SPECIES_ROLE_INHIBITOR
## @var long SPECIES_ROLE_INHIBITOR
##
## <span class="pkg-marker pkg-color-layout">layout</span> One of the
## possible roles of a SpeciesReferenceGlyph.
##
## SpeciesReferenceGlyphs include an attribute to describe the role of a
## given SpeciesReference in a model diagram. The present code is one of
## the possible values for this role attribute.
##
## SBMLQualTypeCode_t
SBML_QUAL_QUALITATIVE_SPECIES = _libsbml.SBML_QUAL_QUALITATIVE_SPECIES
## @var long SBML_QUAL_QUALITATIVE_SPECIES
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_QUAL_TRANSITION = _libsbml.SBML_QUAL_TRANSITION
## @var long SBML_QUAL_TRANSITION
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_QUAL_INPUT = _libsbml.SBML_QUAL_INPUT
## @var long SBML_QUAL_INPUT
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_QUAL_OUTPUT = _libsbml.SBML_QUAL_OUTPUT
## @var long SBML_QUAL_OUTPUT
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_QUAL_FUNCTION_TERM = _libsbml.SBML_QUAL_FUNCTION_TERM
## @var long SBML_QUAL_FUNCTION_TERM
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
SBML_QUAL_DEFAULT_TERM = _libsbml.SBML_QUAL_DEFAULT_TERM
## @var long SBML_QUAL_DEFAULT_TERM
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible SBML component type codes.
##
## LibSBML attaches an identifying code to every kind of SBML object.
## These are known as <em>SBML type codes</em>. In other languages,
## the set of type codes is stored in an enumeration; in the Java
## language interface for libSBML, the type codes are defined as static
## integer constants in the interface class @link libsbml libsbml@endlink.
## The names of the type codes all begin with the characters
## <code>SBML_</code>.
##
## Each libSBML extension for SBML Level 3 packages adds its own
## type codes to objects. The present type code belongs to libSBML's
## extension to support the SBML Level 3 Qualitative Models
## (“qual”) package. It is used to identify the type of
## SBML component to which a given object corresponds.
##
## InputTransitionEffect_t
INPUT_TRANSITION_EFFECT_NONE = _libsbml.INPUT_TRANSITION_EFFECT_NONE
## @var long INPUT_TRANSITION_EFFECT_NONE
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input transition effects.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "transitionEffect" that
## is used to describe how the QualitativeSpecies referenced by the
## Input is affected by the Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an Input object.
##
INPUT_TRANSITION_EFFECT_CONSUMPTION = _libsbml.INPUT_TRANSITION_EFFECT_CONSUMPTION
## @var long INPUT_TRANSITION_EFFECT_CONSUMPTION
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input transition effects.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "transitionEffect" that
## is used to describe how the QualitativeSpecies referenced by the
## Input is affected by the Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an Input object.
##
INPUT_TRANSITION_EFFECT_UNKNOWN = _libsbml.INPUT_TRANSITION_EFFECT_UNKNOWN
## @var long INPUT_TRANSITION_EFFECT_UNKNOWN
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input transition effects.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "transitionEffect" that
## is used to describe how the QualitativeSpecies referenced by the
## Input is affected by the Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an Input object.
##
## InputSign_t
INPUT_SIGN_POSITIVE = _libsbml.INPUT_SIGN_POSITIVE
## @var long INPUT_SIGN_POSITIVE
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input "sign" attribute values.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "sign" that is used to
## indicate whether the contribution of this input is positive, negative,
## both (dual) or unknown. This enables a model to distinguish between
## stimulation and inhibition and can facilitate interpretation of
## themodel without the mathematics. The sign is particularly used for
## visualization purposes and has no impact on the mathematical
## interpretation.
##
## The present code is one of the possible values of the "sign" attribute
## of an Input object.
##
INPUT_SIGN_NEGATIVE = _libsbml.INPUT_SIGN_NEGATIVE
## @var long INPUT_SIGN_NEGATIVE
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input "sign" attribute values.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "sign" that is used to
## indicate whether the contribution of this input is positive, negative,
## both (dual) or unknown. This enables a model to distinguish between
## stimulation and inhibition and can facilitate interpretation of
## themodel without the mathematics. The sign is particularly used for
## visualization purposes and has no impact on the mathematical
## interpretation.
##
## The present code is one of the possible values of the "sign" attribute
## of an Input object.
##
INPUT_SIGN_DUAL = _libsbml.INPUT_SIGN_DUAL
## @var long INPUT_SIGN_DUAL
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input "sign" attribute values.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "sign" that is used to
## indicate whether the contribution of this input is positive, negative,
## both (dual) or unknown. This enables a model to distinguish between
## stimulation and inhibition and can facilitate interpretation of
## themodel without the mathematics. The sign is particularly used for
## visualization purposes and has no impact on the mathematical
## interpretation.
##
## The present code is one of the possible values of the "sign" attribute
## of an Input object.
##
INPUT_SIGN_UNKNOWN = _libsbml.INPUT_SIGN_UNKNOWN
## @var long INPUT_SIGN_UNKNOWN
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input "sign" attribute values.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "sign" that is used to
## indicate whether the contribution of this input is positive, negative,
## both (dual) or unknown. This enables a model to distinguish between
## stimulation and inhibition and can facilitate interpretation of
## themodel without the mathematics. The sign is particularly used for
## visualization purposes and has no impact on the mathematical
## interpretation.
##
## The present code is one of the possible values of the "sign" attribute
## of an Input object.
##
INPUT_SIGN_VALUE_NOTSET = _libsbml.INPUT_SIGN_VALUE_NOTSET
## @var long INPUT_SIGN_VALUE_NOTSET
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible Input "sign" attribute values.
##
## The Input class is part of the SBML Level 3 Qualitative Models
## package. Its purpose is to represent a qualitative species that
## participates in a Transition; specifically, in Petri nets, these are
## the input places of the transition, and in logical models, they are
## the regulators of the species whose behaviour is defined by the
## transition. Input has an attribute named "sign" that is used to
## indicate whether the contribution of this input is positive, negative,
## both (dual) or unknown. This enables a model to distinguish between
## stimulation and inhibition and can facilitate interpretation of
## themodel without the mathematics. The sign is particularly used for
## visualization purposes and has no impact on the mathematical
## interpretation.
##
## The present code is one of the possible values of the "sign" attribute
## of an Input object.
##
## OutputTransitionEffect_t
OUTPUT_TRANSITION_EFFECT_PRODUCTION = _libsbml.OUTPUT_TRANSITION_EFFECT_PRODUCTION
## @var long OUTPUT_TRANSITION_EFFECT_PRODUCTION
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible OutputTransition "transitionEffect" attribute values.
##
## The OutputTransition class is part of the SBML Level 3
## Qualitative Models package. Its purpose is to represent a qualitative
## species that is affected by a Transition. (In Petri net models, these
## are the output places of the transition.) OutputTransition has an
## attribute named "transitionEffect" that is used to describe how the
## QualitativeSpecies referenced by the Output is affected by the
## Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an OutputTransition object.
##
OUTPUT_TRANSITION_EFFECT_ASSIGNMENT_LEVEL = _libsbml.OUTPUT_TRANSITION_EFFECT_ASSIGNMENT_LEVEL
## @var long OUTPUT_TRANSITION_EFFECT_ASSIGNMENT_LEVEL
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible OutputTransition "transitionEffect" attribute values.
##
## The OutputTransition class is part of the SBML Level 3
## Qualitative Models package. Its purpose is to represent a qualitative
## species that is affected by a Transition. (In Petri net models, these
## are the output places of the transition.) OutputTransition has an
## attribute named "transitionEffect" that is used to describe how the
## QualitativeSpecies referenced by the Output is affected by the
## Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an OutputTransition object.
##
OUTPUT_TRANSITION_EFFECT_UNKNOWN = _libsbml.OUTPUT_TRANSITION_EFFECT_UNKNOWN
## @var long OUTPUT_TRANSITION_EFFECT_UNKNOWN
##
## <span class="pkg-marker pkg-color-qual">qual</span> One of the
## possible OutputTransition "transitionEffect" attribute values.
##
## The OutputTransition class is part of the SBML Level 3
## Qualitative Models package. Its purpose is to represent a qualitative
## species that is affected by a Transition. (In Petri net models, these
## are the output places of the transition.) OutputTransition has an
## attribute named "transitionEffect" that is used to describe how the
## QualitativeSpecies referenced by the Output is affected by the
## Transition.
##
## The present code is one of the possible values of the
## "transitionEffect" attribute of an OutputTransition object.
##
## CompSBMLErrorCode_t
CompUnknown = _libsbml.CompUnknown
## @var long CompUnknown
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompNSUndeclared = _libsbml.CompNSUndeclared
## @var long CompNSUndeclared
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompElementNotInNs = _libsbml.CompElementNotInNs
## @var long CompElementNotInNs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDuplicateComponentId = _libsbml.CompDuplicateComponentId
## @var long CompDuplicateComponentId
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompUniqueModelIds = _libsbml.CompUniqueModelIds
## @var long CompUniqueModelIds
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompUniquePortIds = _libsbml.CompUniquePortIds
## @var long CompUniquePortIds
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidSIdSyntax = _libsbml.CompInvalidSIdSyntax
## @var long CompInvalidSIdSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidSubmodelRefSyntax = _libsbml.CompInvalidSubmodelRefSyntax
## @var long CompInvalidSubmodelRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidDeletionSyntax = _libsbml.CompInvalidDeletionSyntax
## @var long CompInvalidDeletionSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidConversionFactorSyntax = _libsbml.CompInvalidConversionFactorSyntax
## @var long CompInvalidConversionFactorSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidNameSyntax = _libsbml.CompInvalidNameSyntax
## @var long CompInvalidNameSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedUnitsShouldMatch = _libsbml.CompReplacedUnitsShouldMatch
## @var long CompReplacedUnitsShouldMatch
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneListOfReplacedElements = _libsbml.CompOneListOfReplacedElements
## @var long CompOneListOfReplacedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOReplaceElementsAllowedElements = _libsbml.CompLOReplaceElementsAllowedElements
## @var long CompLOReplaceElementsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOReplacedElementsAllowedAttribs = _libsbml.CompLOReplacedElementsAllowedAttribs
## @var long CompLOReplacedElementsAllowedAttribs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompEmptyLOReplacedElements = _libsbml.CompEmptyLOReplacedElements
## @var long CompEmptyLOReplacedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneReplacedByElement = _libsbml.CompOneReplacedByElement
## @var long CompOneReplacedByElement
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompAttributeRequiredMissing = _libsbml.CompAttributeRequiredMissing
## @var long CompAttributeRequiredMissing
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompAttributeRequiredMustBeBoolean = _libsbml.CompAttributeRequiredMustBeBoolean
## @var long CompAttributeRequiredMustBeBoolean
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompRequiredTrueIfElementsRemain = _libsbml.CompRequiredTrueIfElementsRemain
## @var long CompRequiredTrueIfElementsRemain
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompRequiredFalseIfAllElementsReplaced = _libsbml.CompRequiredFalseIfAllElementsReplaced
## @var long CompRequiredFalseIfAllElementsReplaced
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneListOfModelDefinitions = _libsbml.CompOneListOfModelDefinitions
## @var long CompOneListOfModelDefinitions
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompEmptyLOModelDefs = _libsbml.CompEmptyLOModelDefs
## @var long CompEmptyLOModelDefs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOModelDefsAllowedElements = _libsbml.CompLOModelDefsAllowedElements
## @var long CompLOModelDefsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOExtModelDefsAllowedElements = _libsbml.CompLOExtModelDefsAllowedElements
## @var long CompLOExtModelDefsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOModelDefsAllowedAttributes = _libsbml.CompLOModelDefsAllowedAttributes
## @var long CompLOModelDefsAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOExtModDefsAllowedAttributes = _libsbml.CompLOExtModDefsAllowedAttributes
## @var long CompLOExtModDefsAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneListOfExtModelDefinitions = _libsbml.CompOneListOfExtModelDefinitions
## @var long CompOneListOfExtModelDefinitions
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompAttributeRequiredMustBeTrue = _libsbml.CompAttributeRequiredMustBeTrue
## @var long CompAttributeRequiredMustBeTrue
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtModDefAllowedCoreAttributes = _libsbml.CompExtModDefAllowedCoreAttributes
## @var long CompExtModDefAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtModDefAllowedElements = _libsbml.CompExtModDefAllowedElements
## @var long CompExtModDefAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtModDefAllowedAttributes = _libsbml.CompExtModDefAllowedAttributes
## @var long CompExtModDefAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReferenceMustBeL3 = _libsbml.CompReferenceMustBeL3
## @var long CompReferenceMustBeL3
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompModReferenceMustIdOfModel = _libsbml.CompModReferenceMustIdOfModel
## @var long CompModReferenceMustIdOfModel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtModMd5DoesNotMatch = _libsbml.CompExtModMd5DoesNotMatch
## @var long CompExtModMd5DoesNotMatch
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidSourceSyntax = _libsbml.CompInvalidSourceSyntax
## @var long CompInvalidSourceSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidModelRefSyntax = _libsbml.CompInvalidModelRefSyntax
## @var long CompInvalidModelRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidMD5Syntax = _libsbml.CompInvalidMD5Syntax
## @var long CompInvalidMD5Syntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompCircularExternalModelReference = _libsbml.CompCircularExternalModelReference
## @var long CompCircularExternalModelReference
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneListOfOnModel = _libsbml.CompOneListOfOnModel
## @var long CompOneListOfOnModel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompNoEmptyListOfOnModel = _libsbml.CompNoEmptyListOfOnModel
## @var long CompNoEmptyListOfOnModel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOSubmodelsAllowedElements = _libsbml.CompLOSubmodelsAllowedElements
## @var long CompLOSubmodelsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOPortsAllowedElements = _libsbml.CompLOPortsAllowedElements
## @var long CompLOPortsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOSubmodelsAllowedAttributes = _libsbml.CompLOSubmodelsAllowedAttributes
## @var long CompLOSubmodelsAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLOPortsAllowedAttributes = _libsbml.CompLOPortsAllowedAttributes
## @var long CompLOPortsAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelAllowedCoreAttributes = _libsbml.CompSubmodelAllowedCoreAttributes
## @var long CompSubmodelAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelAllowedElements = _libsbml.CompSubmodelAllowedElements
## @var long CompSubmodelAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneListOfDeletionOnSubmodel = _libsbml.CompOneListOfDeletionOnSubmodel
## @var long CompOneListOfDeletionOnSubmodel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelNoEmptyLODeletions = _libsbml.CompSubmodelNoEmptyLODeletions
## @var long CompSubmodelNoEmptyLODeletions
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLODeletionsAllowedElements = _libsbml.CompLODeletionsAllowedElements
## @var long CompLODeletionsAllowedElements
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLODeletionAllowedAttributes = _libsbml.CompLODeletionAllowedAttributes
## @var long CompLODeletionAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelAllowedAttributes = _libsbml.CompSubmodelAllowedAttributes
## @var long CompSubmodelAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompModReferenceSyntax = _libsbml.CompModReferenceSyntax
## @var long CompModReferenceSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidTimeConvFactorSyntax = _libsbml.CompInvalidTimeConvFactorSyntax
## @var long CompInvalidTimeConvFactorSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidExtentConvFactorSyntax = _libsbml.CompInvalidExtentConvFactorSyntax
## @var long CompInvalidExtentConvFactorSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelMustReferenceModel = _libsbml.CompSubmodelMustReferenceModel
## @var long CompSubmodelMustReferenceModel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSubmodelCannotReferenceSelf = _libsbml.CompSubmodelCannotReferenceSelf
## @var long CompSubmodelCannotReferenceSelf
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompModCannotCircularlyReferenceSelf = _libsbml.CompModCannotCircularlyReferenceSelf
## @var long CompModCannotCircularlyReferenceSelf
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompTimeConversionMustBeParameter = _libsbml.CompTimeConversionMustBeParameter
## @var long CompTimeConversionMustBeParameter
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtentConversionMustBeParameter = _libsbml.CompExtentConversionMustBeParameter
## @var long CompExtentConversionMustBeParameter
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompPortRefMustReferencePort = _libsbml.CompPortRefMustReferencePort
## @var long CompPortRefMustReferencePort
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompIdRefMustReferenceObject = _libsbml.CompIdRefMustReferenceObject
## @var long CompIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompUnitRefMustReferenceUnitDef = _libsbml.CompUnitRefMustReferenceUnitDef
## @var long CompUnitRefMustReferenceUnitDef
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMetaIdRefMustReferenceObject = _libsbml.CompMetaIdRefMustReferenceObject
## @var long CompMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompParentOfSBRefChildMustBeSubmodel = _libsbml.CompParentOfSBRefChildMustBeSubmodel
## @var long CompParentOfSBRefChildMustBeSubmodel
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidPortRefSyntax = _libsbml.CompInvalidPortRefSyntax
## @var long CompInvalidPortRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidIdRefSyntax = _libsbml.CompInvalidIdRefSyntax
## @var long CompInvalidIdRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidUnitRefSyntax = _libsbml.CompInvalidUnitRefSyntax
## @var long CompInvalidUnitRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompInvalidMetaIdRefSyntax = _libsbml.CompInvalidMetaIdRefSyntax
## @var long CompInvalidMetaIdRefSyntax
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompOneSBaseRefOnly = _libsbml.CompOneSBaseRefOnly
## @var long CompOneSBaseRefOnly
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeprecatedSBaseRefSpelling = _libsbml.CompDeprecatedSBaseRefSpelling
## @var long CompDeprecatedSBaseRefSpelling
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSBaseRefMustReferenceObject = _libsbml.CompSBaseRefMustReferenceObject
## @var long CompSBaseRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompSBaseRefMustReferenceOnlyOneObject = _libsbml.CompSBaseRefMustReferenceOnlyOneObject
## @var long CompSBaseRefMustReferenceOnlyOneObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompNoMultipleReferences = _libsbml.CompNoMultipleReferences
## @var long CompNoMultipleReferences
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompPortMustReferenceObject = _libsbml.CompPortMustReferenceObject
## @var long CompPortMustReferenceObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompPortMustReferenceOnlyOneObject = _libsbml.CompPortMustReferenceOnlyOneObject
## @var long CompPortMustReferenceOnlyOneObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompPortAllowedAttributes = _libsbml.CompPortAllowedAttributes
## @var long CompPortAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompPortReferencesUnique = _libsbml.CompPortReferencesUnique
## @var long CompPortReferencesUnique
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeletionMustReferenceObject = _libsbml.CompDeletionMustReferenceObject
## @var long CompDeletionMustReferenceObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeletionMustReferOnlyOneObject = _libsbml.CompDeletionMustReferOnlyOneObject
## @var long CompDeletionMustReferOnlyOneObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeletionAllowedAttributes = _libsbml.CompDeletionAllowedAttributes
## @var long CompDeletionAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementMustRefObject = _libsbml.CompReplacedElementMustRefObject
## @var long CompReplacedElementMustRefObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementMustRefOnlyOne = _libsbml.CompReplacedElementMustRefOnlyOne
## @var long CompReplacedElementMustRefOnlyOne
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementAllowedAttributes = _libsbml.CompReplacedElementAllowedAttributes
## @var long CompReplacedElementAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementSubModelRef = _libsbml.CompReplacedElementSubModelRef
## @var long CompReplacedElementSubModelRef
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementDeletionRef = _libsbml.CompReplacedElementDeletionRef
## @var long CompReplacedElementDeletionRef
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementConvFactorRef = _libsbml.CompReplacedElementConvFactorRef
## @var long CompReplacedElementConvFactorRef
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementSameReference = _libsbml.CompReplacedElementSameReference
## @var long CompReplacedElementSameReference
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedElementNoDelAndConvFact = _libsbml.CompReplacedElementNoDelAndConvFact
## @var long CompReplacedElementNoDelAndConvFact
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedByMustRefObject = _libsbml.CompReplacedByMustRefObject
## @var long CompReplacedByMustRefObject
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedByMustRefOnlyOne = _libsbml.CompReplacedByMustRefOnlyOne
## @var long CompReplacedByMustRefOnlyOne
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedByAllowedAttributes = _libsbml.CompReplacedByAllowedAttributes
## @var long CompReplacedByAllowedAttributes
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompReplacedBySubModelRef = _libsbml.CompReplacedBySubModelRef
## @var long CompReplacedBySubModelRef
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMustReplaceSameClass = _libsbml.CompMustReplaceSameClass
## @var long CompMustReplaceSameClass
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMustReplaceIDs = _libsbml.CompMustReplaceIDs
## @var long CompMustReplaceIDs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMustReplaceMetaIDs = _libsbml.CompMustReplaceMetaIDs
## @var long CompMustReplaceMetaIDs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMustReplacePackageIDs = _libsbml.CompMustReplacePackageIDs
## @var long CompMustReplacePackageIDs
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompUnresolvedReference = _libsbml.CompUnresolvedReference
## @var long CompUnresolvedReference
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompNoModelInReference = _libsbml.CompNoModelInReference
## @var long CompNoModelInReference
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompExtModDefBad = _libsbml.CompExtModDefBad
## @var long CompExtModDefBad
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompModelFlatteningFailed = _libsbml.CompModelFlatteningFailed
## @var long CompModelFlatteningFailed
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatModelNotValid = _libsbml.CompFlatModelNotValid
## @var long CompFlatModelNotValid
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompLineNumbersUnreliable = _libsbml.CompLineNumbersUnreliable
## @var long CompLineNumbersUnreliable
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatteningNotRecognisedReqd = _libsbml.CompFlatteningNotRecognisedReqd
## @var long CompFlatteningNotRecognisedReqd
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatteningNotRecognisedNotReqd = _libsbml.CompFlatteningNotRecognisedNotReqd
## @var long CompFlatteningNotRecognisedNotReqd
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatteningNotImplementedNotReqd = _libsbml.CompFlatteningNotImplementedNotReqd
## @var long CompFlatteningNotImplementedNotReqd
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatteningNotImplementedReqd = _libsbml.CompFlatteningNotImplementedReqd
## @var long CompFlatteningNotImplementedReqd
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompFlatteningWarning = _libsbml.CompFlatteningWarning
## @var long CompFlatteningWarning
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeprecatedDeleteFunction = _libsbml.CompDeprecatedDeleteFunction
## @var long CompDeprecatedDeleteFunction
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeprecatedReplaceFunction = _libsbml.CompDeprecatedReplaceFunction
## @var long CompDeprecatedReplaceFunction
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompDeletedReplacement = _libsbml.CompDeletedReplacement
## @var long CompDeletedReplacement
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompIdRefMayReferenceUnknownPackage = _libsbml.CompIdRefMayReferenceUnknownPackage
## @var long CompIdRefMayReferenceUnknownPackage
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
CompMetaIdRefMayReferenceUnknownPkg = _libsbml.CompMetaIdRefMayReferenceUnknownPkg
## @var long CompMetaIdRefMayReferenceUnknownPkg
##
## <span class="pkg-marker pkg-color-comp">comp</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “comp” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
## FbcSBMLErrorCode_t
FbcUnknown = _libsbml.FbcUnknown
## @var long FbcUnknown
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcNSUndeclared = _libsbml.FbcNSUndeclared
## @var long FbcNSUndeclared
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcElementNotInNs = _libsbml.FbcElementNotInNs
## @var long FbcElementNotInNs
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcDuplicateComponentId = _libsbml.FbcDuplicateComponentId
## @var long FbcDuplicateComponentId
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcSBMLSIdSyntax = _libsbml.FbcSBMLSIdSyntax
## @var long FbcSBMLSIdSyntax
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcAttributeRequiredMissing = _libsbml.FbcAttributeRequiredMissing
## @var long FbcAttributeRequiredMissing
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcAttributeRequiredMustBeBoolean = _libsbml.FbcAttributeRequiredMustBeBoolean
## @var long FbcAttributeRequiredMustBeBoolean
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcRequiredFalse = _libsbml.FbcRequiredFalse
## @var long FbcRequiredFalse
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcOnlyOneEachListOf = _libsbml.FbcOnlyOneEachListOf
## @var long FbcOnlyOneEachListOf
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcNoEmptyListOfs = _libsbml.FbcNoEmptyListOfs
## @var long FbcNoEmptyListOfs
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcLOFluxBoundsAllowedElements = _libsbml.FbcLOFluxBoundsAllowedElements
## @var long FbcLOFluxBoundsAllowedElements
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcLOObjectivesAllowedElements = _libsbml.FbcLOObjectivesAllowedElements
## @var long FbcLOObjectivesAllowedElements
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcLOFluxBoundsAllowedAttributes = _libsbml.FbcLOFluxBoundsAllowedAttributes
## @var long FbcLOFluxBoundsAllowedAttributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcLOObjectivesAllowedAttributes = _libsbml.FbcLOObjectivesAllowedAttributes
## @var long FbcLOObjectivesAllowedAttributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcActiveObjectiveSyntax = _libsbml.FbcActiveObjectiveSyntax
## @var long FbcActiveObjectiveSyntax
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcActiveObjectiveRefersObjective = _libsbml.FbcActiveObjectiveRefersObjective
## @var long FbcActiveObjectiveRefersObjective
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcSpeciesAllowedL3Attributes = _libsbml.FbcSpeciesAllowedL3Attributes
## @var long FbcSpeciesAllowedL3Attributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcSpeciesChargeMustBeInteger = _libsbml.FbcSpeciesChargeMustBeInteger
## @var long FbcSpeciesChargeMustBeInteger
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcSpeciesFormulaMustBeString = _libsbml.FbcSpeciesFormulaMustBeString
## @var long FbcSpeciesFormulaMustBeString
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundAllowedL3Attributes = _libsbml.FbcFluxBoundAllowedL3Attributes
## @var long FbcFluxBoundAllowedL3Attributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundAllowedElements = _libsbml.FbcFluxBoundAllowedElements
## @var long FbcFluxBoundAllowedElements
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundRequiredAttributes = _libsbml.FbcFluxBoundRequiredAttributes
## @var long FbcFluxBoundRequiredAttributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundRectionMustBeSIdRef = _libsbml.FbcFluxBoundRectionMustBeSIdRef
## @var long FbcFluxBoundRectionMustBeSIdRef
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundNameMustBeString = _libsbml.FbcFluxBoundNameMustBeString
## @var long FbcFluxBoundNameMustBeString
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundOperationMustBeEnum = _libsbml.FbcFluxBoundOperationMustBeEnum
## @var long FbcFluxBoundOperationMustBeEnum
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundValueMustBeDouble = _libsbml.FbcFluxBoundValueMustBeDouble
## @var long FbcFluxBoundValueMustBeDouble
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundReactionMustExist = _libsbml.FbcFluxBoundReactionMustExist
## @var long FbcFluxBoundReactionMustExist
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxBoundsForReactionConflict = _libsbml.FbcFluxBoundsForReactionConflict
## @var long FbcFluxBoundsForReactionConflict
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveAllowedL3Attributes = _libsbml.FbcObjectiveAllowedL3Attributes
## @var long FbcObjectiveAllowedL3Attributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveAllowedElements = _libsbml.FbcObjectiveAllowedElements
## @var long FbcObjectiveAllowedElements
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveRequiredAttributes = _libsbml.FbcObjectiveRequiredAttributes
## @var long FbcObjectiveRequiredAttributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveNameMustBeString = _libsbml.FbcObjectiveNameMustBeString
## @var long FbcObjectiveNameMustBeString
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveTypeMustBeEnum = _libsbml.FbcObjectiveTypeMustBeEnum
## @var long FbcObjectiveTypeMustBeEnum
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveOneListOfObjectives = _libsbml.FbcObjectiveOneListOfObjectives
## @var long FbcObjectiveOneListOfObjectives
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveLOFluxObjMustNotBeEmpty = _libsbml.FbcObjectiveLOFluxObjMustNotBeEmpty
## @var long FbcObjectiveLOFluxObjMustNotBeEmpty
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveLOFluxObjOnlyFluxObj = _libsbml.FbcObjectiveLOFluxObjOnlyFluxObj
## @var long FbcObjectiveLOFluxObjOnlyFluxObj
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcObjectiveLOFluxObjAllowedAttribs = _libsbml.FbcObjectiveLOFluxObjAllowedAttribs
## @var long FbcObjectiveLOFluxObjAllowedAttribs
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectAllowedL3Attributes = _libsbml.FbcFluxObjectAllowedL3Attributes
## @var long FbcFluxObjectAllowedL3Attributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectAllowedElements = _libsbml.FbcFluxObjectAllowedElements
## @var long FbcFluxObjectAllowedElements
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectRequiredAttributes = _libsbml.FbcFluxObjectRequiredAttributes
## @var long FbcFluxObjectRequiredAttributes
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectNameMustBeString = _libsbml.FbcFluxObjectNameMustBeString
## @var long FbcFluxObjectNameMustBeString
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectReactionMustBeSIdRef = _libsbml.FbcFluxObjectReactionMustBeSIdRef
## @var long FbcFluxObjectReactionMustBeSIdRef
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectReactionMustExist = _libsbml.FbcFluxObjectReactionMustExist
## @var long FbcFluxObjectReactionMustExist
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
FbcFluxObjectCoefficientMustBeDouble = _libsbml.FbcFluxObjectCoefficientMustBeDouble
## @var long FbcFluxObjectCoefficientMustBeDouble
##
## <span class="pkg-marker pkg-color-fbc">fbc</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “fbc” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
## LayoutSBMLErrorCode_t
LayoutUnknownError = _libsbml.LayoutUnknownError
## @var long LayoutUnknownError
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutNSUndeclared = _libsbml.LayoutNSUndeclared
## @var long LayoutNSUndeclared
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutElementNotInNs = _libsbml.LayoutElementNotInNs
## @var long LayoutElementNotInNs
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutDuplicateComponentId = _libsbml.LayoutDuplicateComponentId
## @var long LayoutDuplicateComponentId
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSIdSyntax = _libsbml.LayoutSIdSyntax
## @var long LayoutSIdSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutXsiTypeAllowedLocations = _libsbml.LayoutXsiTypeAllowedLocations
## @var long LayoutXsiTypeAllowedLocations
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutXsiTypeSyntax = _libsbml.LayoutXsiTypeSyntax
## @var long LayoutXsiTypeSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutAttributeRequiredMissing = _libsbml.LayoutAttributeRequiredMissing
## @var long LayoutAttributeRequiredMissing
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutAttributeRequiredMustBeBoolean = _libsbml.LayoutAttributeRequiredMustBeBoolean
## @var long LayoutAttributeRequiredMustBeBoolean
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRequiredFalse = _libsbml.LayoutRequiredFalse
## @var long LayoutRequiredFalse
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutOnlyOneLOLayouts = _libsbml.LayoutOnlyOneLOLayouts
## @var long LayoutOnlyOneLOLayouts
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOLayoutsNotEmpty = _libsbml.LayoutLOLayoutsNotEmpty
## @var long LayoutLOLayoutsNotEmpty
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOLayoutsAllowedElements = _libsbml.LayoutLOLayoutsAllowedElements
## @var long LayoutLOLayoutsAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOLayoutsAllowedAttributes = _libsbml.LayoutLOLayoutsAllowedAttributes
## @var long LayoutLOLayoutsAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLayoutAllowedElements = _libsbml.LayoutLayoutAllowedElements
## @var long LayoutLayoutAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLayoutAllowedCoreAttributes = _libsbml.LayoutLayoutAllowedCoreAttributes
## @var long LayoutLayoutAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutOnlyOneEachListOf = _libsbml.LayoutOnlyOneEachListOf
## @var long LayoutOnlyOneEachListOf
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutNoEmptyListOfs = _libsbml.LayoutNoEmptyListOfs
## @var long LayoutNoEmptyListOfs
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLayoutAllowedAttributes = _libsbml.LayoutLayoutAllowedAttributes
## @var long LayoutLayoutAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLayoutNameMustBeString = _libsbml.LayoutLayoutNameMustBeString
## @var long LayoutLayoutNameMustBeString
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOCompGlyphAllowedAttributes = _libsbml.LayoutLOCompGlyphAllowedAttributes
## @var long LayoutLOCompGlyphAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOCompGlyphAllowedElements = _libsbml.LayoutLOCompGlyphAllowedElements
## @var long LayoutLOCompGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSpeciesGlyphAllowedAttributes = _libsbml.LayoutLOSpeciesGlyphAllowedAttributes
## @var long LayoutLOSpeciesGlyphAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSpeciesGlyphAllowedElements = _libsbml.LayoutLOSpeciesGlyphAllowedElements
## @var long LayoutLOSpeciesGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLORnGlyphAllowedAttributes = _libsbml.LayoutLORnGlyphAllowedAttributes
## @var long LayoutLORnGlyphAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLORnGlyphAllowedElements = _libsbml.LayoutLORnGlyphAllowedElements
## @var long LayoutLORnGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOAddGOAllowedAttribut = _libsbml.LayoutLOAddGOAllowedAttribut
## @var long LayoutLOAddGOAllowedAttribut
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOAddGOAllowedElements = _libsbml.LayoutLOAddGOAllowedElements
## @var long LayoutLOAddGOAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLayoutMustHaveDimensions = _libsbml.LayoutLayoutMustHaveDimensions
## @var long LayoutLayoutMustHaveDimensions
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOTextGlyphAllowedAttributes = _libsbml.LayoutLOTextGlyphAllowedAttributes
## @var long LayoutLOTextGlyphAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOTextGlyphAllowedElements = _libsbml.LayoutLOTextGlyphAllowedElements
## @var long LayoutLOTextGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOAllowedCoreElements = _libsbml.LayoutGOAllowedCoreElements
## @var long LayoutGOAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOAllowedCoreAttributes = _libsbml.LayoutGOAllowedCoreAttributes
## @var long LayoutGOAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOAllowedElements = _libsbml.LayoutGOAllowedElements
## @var long LayoutGOAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOAllowedAttributes = _libsbml.LayoutGOAllowedAttributes
## @var long LayoutGOAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOMetaIdRefMustBeIDREF = _libsbml.LayoutGOMetaIdRefMustBeIDREF
## @var long LayoutGOMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOMetaIdRefMustReferenceObject = _libsbml.LayoutGOMetaIdRefMustReferenceObject
## @var long LayoutGOMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGOMustContainBoundingBox = _libsbml.LayoutGOMustContainBoundingBox
## @var long LayoutGOMustContainBoundingBox
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGAllowedCoreElements = _libsbml.LayoutCGAllowedCoreElements
## @var long LayoutCGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGAllowedCoreAttributes = _libsbml.LayoutCGAllowedCoreAttributes
## @var long LayoutCGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGAllowedElements = _libsbml.LayoutCGAllowedElements
## @var long LayoutCGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGAllowedAttributes = _libsbml.LayoutCGAllowedAttributes
## @var long LayoutCGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGMetaIdRefMustBeIDREF = _libsbml.LayoutCGMetaIdRefMustBeIDREF
## @var long LayoutCGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGMetaIdRefMustReferenceObject = _libsbml.LayoutCGMetaIdRefMustReferenceObject
## @var long LayoutCGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGCompartmentSyntax = _libsbml.LayoutCGCompartmentSyntax
## @var long LayoutCGCompartmentSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGCompartmentMustRefComp = _libsbml.LayoutCGCompartmentMustRefComp
## @var long LayoutCGCompartmentMustRefComp
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGNoDuplicateReferences = _libsbml.LayoutCGNoDuplicateReferences
## @var long LayoutCGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCGOrderMustBeDouble = _libsbml.LayoutCGOrderMustBeDouble
## @var long LayoutCGOrderMustBeDouble
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGAllowedCoreElements = _libsbml.LayoutSGAllowedCoreElements
## @var long LayoutSGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGAllowedCoreAttributes = _libsbml.LayoutSGAllowedCoreAttributes
## @var long LayoutSGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGAllowedElements = _libsbml.LayoutSGAllowedElements
## @var long LayoutSGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGAllowedAttributes = _libsbml.LayoutSGAllowedAttributes
## @var long LayoutSGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGMetaIdRefMustBeIDREF = _libsbml.LayoutSGMetaIdRefMustBeIDREF
## @var long LayoutSGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGMetaIdRefMustReferenceObject = _libsbml.LayoutSGMetaIdRefMustReferenceObject
## @var long LayoutSGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGSpeciesSyntax = _libsbml.LayoutSGSpeciesSyntax
## @var long LayoutSGSpeciesSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGSpeciesMustRefSpecies = _libsbml.LayoutSGSpeciesMustRefSpecies
## @var long LayoutSGSpeciesMustRefSpecies
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSGNoDuplicateReferences = _libsbml.LayoutSGNoDuplicateReferences
## @var long LayoutSGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGAllowedCoreElements = _libsbml.LayoutRGAllowedCoreElements
## @var long LayoutRGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGAllowedCoreAttributes = _libsbml.LayoutRGAllowedCoreAttributes
## @var long LayoutRGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGAllowedElements = _libsbml.LayoutRGAllowedElements
## @var long LayoutRGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGAllowedAttributes = _libsbml.LayoutRGAllowedAttributes
## @var long LayoutRGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGMetaIdRefMustBeIDREF = _libsbml.LayoutRGMetaIdRefMustBeIDREF
## @var long LayoutRGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGMetaIdRefMustReferenceObject = _libsbml.LayoutRGMetaIdRefMustReferenceObject
## @var long LayoutRGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGReactionSyntax = _libsbml.LayoutRGReactionSyntax
## @var long LayoutRGReactionSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGReactionMustRefReaction = _libsbml.LayoutRGReactionMustRefReaction
## @var long LayoutRGReactionMustRefReaction
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutRGNoDuplicateReferences = _libsbml.LayoutRGNoDuplicateReferences
## @var long LayoutRGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSpeciesRefGlyphAllowedElements = _libsbml.LayoutLOSpeciesRefGlyphAllowedElements
## @var long LayoutLOSpeciesRefGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSpeciesRefGlyphAllowedAttribs = _libsbml.LayoutLOSpeciesRefGlyphAllowedAttribs
## @var long LayoutLOSpeciesRefGlyphAllowedAttribs
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSpeciesRefGlyphNotEmpty = _libsbml.LayoutLOSpeciesRefGlyphNotEmpty
## @var long LayoutLOSpeciesRefGlyphNotEmpty
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGAllowedCoreElements = _libsbml.LayoutGGAllowedCoreElements
## @var long LayoutGGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGAllowedCoreAttributes = _libsbml.LayoutGGAllowedCoreAttributes
## @var long LayoutGGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGAllowedElements = _libsbml.LayoutGGAllowedElements
## @var long LayoutGGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGAllowedAttributes = _libsbml.LayoutGGAllowedAttributes
## @var long LayoutGGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGMetaIdRefMustBeIDREF = _libsbml.LayoutGGMetaIdRefMustBeIDREF
## @var long LayoutGGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGMetaIdRefMustReferenceObject = _libsbml.LayoutGGMetaIdRefMustReferenceObject
## @var long LayoutGGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGReferenceSyntax = _libsbml.LayoutGGReferenceSyntax
## @var long LayoutGGReferenceSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGReferenceMustRefObject = _libsbml.LayoutGGReferenceMustRefObject
## @var long LayoutGGReferenceMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutGGNoDuplicateReferences = _libsbml.LayoutGGNoDuplicateReferences
## @var long LayoutGGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOReferenceGlyphAllowedElements = _libsbml.LayoutLOReferenceGlyphAllowedElements
## @var long LayoutLOReferenceGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOReferenceGlyphAllowedAttribs = _libsbml.LayoutLOReferenceGlyphAllowedAttribs
## @var long LayoutLOReferenceGlyphAllowedAttribs
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSubGlyphAllowedElements = _libsbml.LayoutLOSubGlyphAllowedElements
## @var long LayoutLOSubGlyphAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOSubGlyphAllowedAttribs = _libsbml.LayoutLOSubGlyphAllowedAttribs
## @var long LayoutLOSubGlyphAllowedAttribs
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGAllowedCoreElements = _libsbml.LayoutTGAllowedCoreElements
## @var long LayoutTGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGAllowedCoreAttributes = _libsbml.LayoutTGAllowedCoreAttributes
## @var long LayoutTGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGAllowedElements = _libsbml.LayoutTGAllowedElements
## @var long LayoutTGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGAllowedAttributes = _libsbml.LayoutTGAllowedAttributes
## @var long LayoutTGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGMetaIdRefMustBeIDREF = _libsbml.LayoutTGMetaIdRefMustBeIDREF
## @var long LayoutTGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGMetaIdRefMustReferenceObject = _libsbml.LayoutTGMetaIdRefMustReferenceObject
## @var long LayoutTGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGOriginOfTextSyntax = _libsbml.LayoutTGOriginOfTextSyntax
## @var long LayoutTGOriginOfTextSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGOriginOfTextMustRefObject = _libsbml.LayoutTGOriginOfTextMustRefObject
## @var long LayoutTGOriginOfTextMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGNoDuplicateReferences = _libsbml.LayoutTGNoDuplicateReferences
## @var long LayoutTGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGGraphicalObjectSyntax = _libsbml.LayoutTGGraphicalObjectSyntax
## @var long LayoutTGGraphicalObjectSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGGraphicalObjectMustRefObject = _libsbml.LayoutTGGraphicalObjectMustRefObject
## @var long LayoutTGGraphicalObjectMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutTGTextMustBeString = _libsbml.LayoutTGTextMustBeString
## @var long LayoutTGTextMustBeString
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGAllowedCoreElements = _libsbml.LayoutSRGAllowedCoreElements
## @var long LayoutSRGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGAllowedCoreAttributes = _libsbml.LayoutSRGAllowedCoreAttributes
## @var long LayoutSRGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGAllowedElements = _libsbml.LayoutSRGAllowedElements
## @var long LayoutSRGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGAllowedAttributes = _libsbml.LayoutSRGAllowedAttributes
## @var long LayoutSRGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGMetaIdRefMustBeIDREF = _libsbml.LayoutSRGMetaIdRefMustBeIDREF
## @var long LayoutSRGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGMetaIdRefMustReferenceObject = _libsbml.LayoutSRGMetaIdRefMustReferenceObject
## @var long LayoutSRGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGSpeciesReferenceSyntax = _libsbml.LayoutSRGSpeciesReferenceSyntax
## @var long LayoutSRGSpeciesReferenceSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGSpeciesRefMustRefObject = _libsbml.LayoutSRGSpeciesRefMustRefObject
## @var long LayoutSRGSpeciesRefMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGNoDuplicateReferences = _libsbml.LayoutSRGNoDuplicateReferences
## @var long LayoutSRGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGSpeciesGlyphSyntax = _libsbml.LayoutSRGSpeciesGlyphSyntax
## @var long LayoutSRGSpeciesGlyphSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGSpeciesGlyphMustRefObject = _libsbml.LayoutSRGSpeciesGlyphMustRefObject
## @var long LayoutSRGSpeciesGlyphMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutSRGRoleSyntax = _libsbml.LayoutSRGRoleSyntax
## @var long LayoutSRGRoleSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGAllowedCoreElements = _libsbml.LayoutREFGAllowedCoreElements
## @var long LayoutREFGAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGAllowedCoreAttributes = _libsbml.LayoutREFGAllowedCoreAttributes
## @var long LayoutREFGAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGAllowedElements = _libsbml.LayoutREFGAllowedElements
## @var long LayoutREFGAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGAllowedAttributes = _libsbml.LayoutREFGAllowedAttributes
## @var long LayoutREFGAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGMetaIdRefMustBeIDREF = _libsbml.LayoutREFGMetaIdRefMustBeIDREF
## @var long LayoutREFGMetaIdRefMustBeIDREF
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGMetaIdRefMustReferenceObject = _libsbml.LayoutREFGMetaIdRefMustReferenceObject
## @var long LayoutREFGMetaIdRefMustReferenceObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGReferenceSyntax = _libsbml.LayoutREFGReferenceSyntax
## @var long LayoutREFGReferenceSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGReferenceMustRefObject = _libsbml.LayoutREFGReferenceMustRefObject
## @var long LayoutREFGReferenceMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGNoDuplicateReferences = _libsbml.LayoutREFGNoDuplicateReferences
## @var long LayoutREFGNoDuplicateReferences
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGGlyphSyntax = _libsbml.LayoutREFGGlyphSyntax
## @var long LayoutREFGGlyphSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGGlyphMustRefObject = _libsbml.LayoutREFGGlyphMustRefObject
## @var long LayoutREFGGlyphMustRefObject
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutREFGRoleSyntax = _libsbml.LayoutREFGRoleSyntax
## @var long LayoutREFGRoleSyntax
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutPointAllowedCoreElements = _libsbml.LayoutPointAllowedCoreElements
## @var long LayoutPointAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutPointAllowedCoreAttributes = _libsbml.LayoutPointAllowedCoreAttributes
## @var long LayoutPointAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutPointAllowedAttributes = _libsbml.LayoutPointAllowedAttributes
## @var long LayoutPointAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutPointAttributesMustBeDouble = _libsbml.LayoutPointAttributesMustBeDouble
## @var long LayoutPointAttributesMustBeDouble
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutBBoxAllowedCoreElements = _libsbml.LayoutBBoxAllowedCoreElements
## @var long LayoutBBoxAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutBBoxAllowedCoreAttributes = _libsbml.LayoutBBoxAllowedCoreAttributes
## @var long LayoutBBoxAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutBBoxAllowedElements = _libsbml.LayoutBBoxAllowedElements
## @var long LayoutBBoxAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutBBoxAllowedAttributes = _libsbml.LayoutBBoxAllowedAttributes
## @var long LayoutBBoxAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutBBoxConsistent3DDefinition = _libsbml.LayoutBBoxConsistent3DDefinition
## @var long LayoutBBoxConsistent3DDefinition
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCurveAllowedCoreElements = _libsbml.LayoutCurveAllowedCoreElements
## @var long LayoutCurveAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCurveAllowedCoreAttributes = _libsbml.LayoutCurveAllowedCoreAttributes
## @var long LayoutCurveAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCurveAllowedElements = _libsbml.LayoutCurveAllowedElements
## @var long LayoutCurveAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCurveAllowedAttributes = _libsbml.LayoutCurveAllowedAttributes
## @var long LayoutCurveAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOCurveSegsAllowedAttributes = _libsbml.LayoutLOCurveSegsAllowedAttributes
## @var long LayoutLOCurveSegsAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOCurveSegsAllowedElements = _libsbml.LayoutLOCurveSegsAllowedElements
## @var long LayoutLOCurveSegsAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLOCurveSegsNotEmpty = _libsbml.LayoutLOCurveSegsNotEmpty
## @var long LayoutLOCurveSegsNotEmpty
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLSegAllowedCoreElements = _libsbml.LayoutLSegAllowedCoreElements
## @var long LayoutLSegAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLSegAllowedCoreAttributes = _libsbml.LayoutLSegAllowedCoreAttributes
## @var long LayoutLSegAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLSegAllowedElements = _libsbml.LayoutLSegAllowedElements
## @var long LayoutLSegAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutLSegAllowedAttributes = _libsbml.LayoutLSegAllowedAttributes
## @var long LayoutLSegAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCBezAllowedCoreElements = _libsbml.LayoutCBezAllowedCoreElements
## @var long LayoutCBezAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCBezAllowedCoreAttributes = _libsbml.LayoutCBezAllowedCoreAttributes
## @var long LayoutCBezAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCBezAllowedElements = _libsbml.LayoutCBezAllowedElements
## @var long LayoutCBezAllowedElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutCBezAllowedAttributes = _libsbml.LayoutCBezAllowedAttributes
## @var long LayoutCBezAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutDimsAllowedCoreElements = _libsbml.LayoutDimsAllowedCoreElements
## @var long LayoutDimsAllowedCoreElements
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutDimsAllowedCoreAttributes = _libsbml.LayoutDimsAllowedCoreAttributes
## @var long LayoutDimsAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutDimsAllowedAttributes = _libsbml.LayoutDimsAllowedAttributes
## @var long LayoutDimsAllowedAttributes
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
LayoutDimsAttributesMustBeDouble = _libsbml.LayoutDimsAttributesMustBeDouble
## @var long LayoutDimsAttributesMustBeDouble
##
## <span class="pkg-marker pkg-color-layout">layout</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “layout” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
## QualSBMLErrorCode_t
QualUnknown = _libsbml.QualUnknown
## @var long QualUnknown
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualNSUndeclared = _libsbml.QualNSUndeclared
## @var long QualNSUndeclared
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualElementNotInNs = _libsbml.QualElementNotInNs
## @var long QualElementNotInNs
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualFunctionTermBool = _libsbml.QualFunctionTermBool
## @var long QualFunctionTermBool
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualMathCSymbolDisallowed = _libsbml.QualMathCSymbolDisallowed
## @var long QualMathCSymbolDisallowed
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDuplicateComponentId = _libsbml.QualDuplicateComponentId
## @var long QualDuplicateComponentId
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualAttributeRequiredMissing = _libsbml.QualAttributeRequiredMissing
## @var long QualAttributeRequiredMissing
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualAttributeRequiredMustBeBoolean = _libsbml.QualAttributeRequiredMustBeBoolean
## @var long QualAttributeRequiredMustBeBoolean
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualRequiredTrueIfTransitions = _libsbml.QualRequiredTrueIfTransitions
## @var long QualRequiredTrueIfTransitions
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOneListOfTransOrQS = _libsbml.QualOneListOfTransOrQS
## @var long QualOneListOfTransOrQS
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualEmptyLONotAllowed = _libsbml.QualEmptyLONotAllowed
## @var long QualEmptyLONotAllowed
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualLOTransitiondAllowedElements = _libsbml.QualLOTransitiondAllowedElements
## @var long QualLOTransitiondAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualLOQualSpeciesAllowedElements = _libsbml.QualLOQualSpeciesAllowedElements
## @var long QualLOQualSpeciesAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualLOQualSpeciesAllowedAttributes = _libsbml.QualLOQualSpeciesAllowedAttributes
## @var long QualLOQualSpeciesAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualLOTransitionsAllowedAttributes = _libsbml.QualLOTransitionsAllowedAttributes
## @var long QualLOTransitionsAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualQualSpeciesAllowedCoreAttributes = _libsbml.QualQualSpeciesAllowedCoreAttributes
## @var long QualQualSpeciesAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualQualSpeciesAllowedElements = _libsbml.QualQualSpeciesAllowedElements
## @var long QualQualSpeciesAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualQualSpeciesAllowedAttributes = _libsbml.QualQualSpeciesAllowedAttributes
## @var long QualQualSpeciesAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualConstantMustBeBool = _libsbml.QualConstantMustBeBool
## @var long QualConstantMustBeBool
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualNameMustBeString = _libsbml.QualNameMustBeString
## @var long QualNameMustBeString
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInitialLevelMustBeInt = _libsbml.QualInitialLevelMustBeInt
## @var long QualInitialLevelMustBeInt
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualMaxLevelMustBeInt = _libsbml.QualMaxLevelMustBeInt
## @var long QualMaxLevelMustBeInt
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualCompartmentMustReferExisting = _libsbml.QualCompartmentMustReferExisting
## @var long QualCompartmentMustReferExisting
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInitialLevelCannotExceedMax = _libsbml.QualInitialLevelCannotExceedMax
## @var long QualInitialLevelCannotExceedMax
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualConstantQSCannotBeOutput = _libsbml.QualConstantQSCannotBeOutput
## @var long QualConstantQSCannotBeOutput
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualQSAssignedOnlyOnce = _libsbml.QualQSAssignedOnlyOnce
## @var long QualQSAssignedOnlyOnce
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInitalLevelNotNegative = _libsbml.QualInitalLevelNotNegative
## @var long QualInitalLevelNotNegative
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualMaxLevelNotNegative = _libsbml.QualMaxLevelNotNegative
## @var long QualMaxLevelNotNegative
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionAllowedCoreAttributes = _libsbml.QualTransitionAllowedCoreAttributes
## @var long QualTransitionAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionAllowedElements = _libsbml.QualTransitionAllowedElements
## @var long QualTransitionAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionAllowedAttributes = _libsbml.QualTransitionAllowedAttributes
## @var long QualTransitionAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionNameMustBeString = _libsbml.QualTransitionNameMustBeString
## @var long QualTransitionNameMustBeString
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOElements = _libsbml.QualTransitionLOElements
## @var long QualTransitionLOElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionEmptyLOElements = _libsbml.QualTransitionEmptyLOElements
## @var long QualTransitionEmptyLOElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOInputElements = _libsbml.QualTransitionLOInputElements
## @var long QualTransitionLOInputElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOOutputElements = _libsbml.QualTransitionLOOutputElements
## @var long QualTransitionLOOutputElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOFuncTermElements = _libsbml.QualTransitionLOFuncTermElements
## @var long QualTransitionLOFuncTermElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOInputAttributes = _libsbml.QualTransitionLOInputAttributes
## @var long QualTransitionLOInputAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOOutputAttributes = _libsbml.QualTransitionLOOutputAttributes
## @var long QualTransitionLOOutputAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOFuncTermAttributes = _libsbml.QualTransitionLOFuncTermAttributes
## @var long QualTransitionLOFuncTermAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOFuncTermExceedMax = _libsbml.QualTransitionLOFuncTermExceedMax
## @var long QualTransitionLOFuncTermExceedMax
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualTransitionLOFuncTermNegative = _libsbml.QualTransitionLOFuncTermNegative
## @var long QualTransitionLOFuncTermNegative
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputAllowedCoreAttributes = _libsbml.QualInputAllowedCoreAttributes
## @var long QualInputAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputAllowedElements = _libsbml.QualInputAllowedElements
## @var long QualInputAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputAllowedAttributes = _libsbml.QualInputAllowedAttributes
## @var long QualInputAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputNameMustBeString = _libsbml.QualInputNameMustBeString
## @var long QualInputNameMustBeString
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputSignMustBeSignEnum = _libsbml.QualInputSignMustBeSignEnum
## @var long QualInputSignMustBeSignEnum
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputTransEffectMustBeInputEffect = _libsbml.QualInputTransEffectMustBeInputEffect
## @var long QualInputTransEffectMustBeInputEffect
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputThreshMustBeInteger = _libsbml.QualInputThreshMustBeInteger
## @var long QualInputThreshMustBeInteger
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputQSMustBeExistingQS = _libsbml.QualInputQSMustBeExistingQS
## @var long QualInputQSMustBeExistingQS
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputConstantCannotBeConsumed = _libsbml.QualInputConstantCannotBeConsumed
## @var long QualInputConstantCannotBeConsumed
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualInputThreshMustBeNonNegative = _libsbml.QualInputThreshMustBeNonNegative
## @var long QualInputThreshMustBeNonNegative
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputAllowedCoreAttributes = _libsbml.QualOutputAllowedCoreAttributes
## @var long QualOutputAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputAllowedElements = _libsbml.QualOutputAllowedElements
## @var long QualOutputAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputAllowedAttributes = _libsbml.QualOutputAllowedAttributes
## @var long QualOutputAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputNameMustBeString = _libsbml.QualOutputNameMustBeString
## @var long QualOutputNameMustBeString
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputTransEffectMustBeOutput = _libsbml.QualOutputTransEffectMustBeOutput
## @var long QualOutputTransEffectMustBeOutput
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputLevelMustBeInteger = _libsbml.QualOutputLevelMustBeInteger
## @var long QualOutputLevelMustBeInteger
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputQSMustBeExistingQS = _libsbml.QualOutputQSMustBeExistingQS
## @var long QualOutputQSMustBeExistingQS
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputConstantMustBeFalse = _libsbml.QualOutputConstantMustBeFalse
## @var long QualOutputConstantMustBeFalse
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputProductionMustHaveLevel = _libsbml.QualOutputProductionMustHaveLevel
## @var long QualOutputProductionMustHaveLevel
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualOutputLevelMustBeNonNegative = _libsbml.QualOutputLevelMustBeNonNegative
## @var long QualOutputLevelMustBeNonNegative
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDefaultTermAllowedCoreAttributes = _libsbml.QualDefaultTermAllowedCoreAttributes
## @var long QualDefaultTermAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDefaultTermAllowedElements = _libsbml.QualDefaultTermAllowedElements
## @var long QualDefaultTermAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDefaultTermAllowedAttributes = _libsbml.QualDefaultTermAllowedAttributes
## @var long QualDefaultTermAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDefaultTermResultMustBeInteger = _libsbml.QualDefaultTermResultMustBeInteger
## @var long QualDefaultTermResultMustBeInteger
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualDefaultTermResultMustBeNonNeg = _libsbml.QualDefaultTermResultMustBeNonNeg
## @var long QualDefaultTermResultMustBeNonNeg
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualFuncTermAllowedCoreAttributes = _libsbml.QualFuncTermAllowedCoreAttributes
## @var long QualFuncTermAllowedCoreAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
##
QualFuncTermAllowedElements = _libsbml.QualFuncTermAllowedElements
## @var long QualFuncTermAllowedElements
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
QualFuncTermAllowedAttributes = _libsbml.QualFuncTermAllowedAttributes
## @var long QualFuncTermAllowedAttributes
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
QualFuncTermOnlyOneMath = _libsbml.QualFuncTermOnlyOneMath
## @var long QualFuncTermOnlyOneMath
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
QualFuncTermResultMustBeInteger = _libsbml.QualFuncTermResultMustBeInteger
## @var long QualFuncTermResultMustBeInteger
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
QualFuncTermResultMustBeNonNeg = _libsbml.QualFuncTermResultMustBeNonNeg
## @var long QualFuncTermResultMustBeNonNeg
##
## <span class="pkg-marker pkg-color-qual">qual</span> A value in the
## enumeration of all the error and warning codes generated by the
## libSBML “qual” extension for objects of class
## SBMLError. Please consult the documentation for SBMLError
## for an explanation of the meaning of this particular error code.
LIBSBML_OVERRIDE_DISABLED = _libsbml.LIBSBML_OVERRIDE_DISABLED
## @var long LIBSBML_OVERRIDE_DISABLED
##
## Severity override code for errors logged in the XML layer.
##
## XMLErrorLog can be configured whether to log errors or not log them.
## This code has the following meaning: log errors in the error log, as
## normal.
LIBSBML_OVERRIDE_DONT_LOG = _libsbml.LIBSBML_OVERRIDE_DONT_LOG
## @var long LIBSBML_OVERRIDE_DONT_LOG
##
## Severity override code for errors logged in the XML layer.
##
## XMLErrorLog can be configured whether to log errors or not log them.
## This code has the following meaning: disable all error logging.
LIBSBML_OVERRIDE_WARNING = _libsbml.LIBSBML_OVERRIDE_WARNING
## @var long LIBSBML_OVERRIDE_WARNING
##
## Severity override code for errors logged in the XML layer.
##
## XMLErrorLog can be configured whether to log errors or not log them.
## This code has the following meaning: log all errors as warnings instead
## of actual errors.
AnnotationNotElement = _libsbml.AnnotationNotElement
## @var long AnnotationNotElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3PackageOnLowerSBML = _libsbml.L3PackageOnLowerSBML
## @var long L3PackageOnLowerSBML
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
GlobalUnitsNotDeclared = _libsbml.GlobalUnitsNotDeclared
## @var long GlobalUnitsNotDeclared
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
HasOnlySubstanceUnitsNotinL1 = _libsbml.HasOnlySubstanceUnitsNotinL1
## @var long HasOnlySubstanceUnitsNotinL1
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AvogadroNotSupported = _libsbml.AvogadroNotSupported
## @var long AvogadroNotSupported
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
PackageRequiredShouldBeFalse = _libsbml.PackageRequiredShouldBeFalse
## @var long PackageRequiredShouldBeFalse
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
L3SubstanceUnitsOnModel = _libsbml.L3SubstanceUnitsOnModel
## @var long L3SubstanceUnitsOnModel
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RDFMissingAboutTag = _libsbml.RDFMissingAboutTag
## @var long RDFMissingAboutTag
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RDFEmptyAboutTag = _libsbml.RDFEmptyAboutTag
## @var long RDFEmptyAboutTag
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RDFAboutTagNotMetaid = _libsbml.RDFAboutTagNotMetaid
## @var long RDFAboutTagNotMetaid
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RDFNotCompleteModelHistory = _libsbml.RDFNotCompleteModelHistory
## @var long RDFNotCompleteModelHistory
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
RDFNotModelHistory = _libsbml.RDFNotModelHistory
## @var long RDFNotModelHistory
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
AnnotationNotElement = _libsbml.AnnotationNotElement
## @var long AnnotationNotElement
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class SBMLError. Please consult the
## documentation for SBMLError for an explanation of the
## meaning of this particular error code.
DanglingUnitSIdRef = _libsbml.DanglingUnitSIdRef
## @var long DanglingUnitSIdRef
##
## A value in the enumeration of all the SBML error and warning codes
## for objects of class {@link SBMLError}. Please consult the
## documentation for {@link SBMLError} for an explanation of the
## meaning of this particular error code.
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/docs/src/python-substitutions/libsbml.py
|
Python
|
bsd-3-clause
| 479,413
|
[
"Avogadro"
] |
5072b8521607376bc2af252a75fed3610ff7ba53b3208c2281bef4a3f8f51635
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions to simulate an Ornstein-Uhlenbeck process for the instantaneous growth rate
See also
--------
:mod:`tunacell.simu.base` for base classes to parameterize numerical simulations
"""
from __future__ import print_function
from builtins import input # future package
import os
import shutil
import logging
import datetime
import uuid
from tqdm import tqdm
import numpy as np
from tunacell.base.experiment import Experiment
from tunacell.base.container import Container
from tunacell.base.colony import Colony
from tunacell.io.metadata import Metadata
from tunacell.simu.base import Ecoli, SimuParams, DivisionParams, SampleInitialSize
def run_ou_simulation(
simuParams, divParams, bsParams, ouParams, where, label, force=False
):
"""API function to run and export OU numerical simulation
Parameters
----------
simuParams : :class:`tunacell.simu.base.SimuParams` instance
sets general simulation parameters
divParams : :class:`tunacell.simu.base.DivisionParams` instance
sets cell division process
bsParams : :class:`tunacell.simu.base.SampleInitialSize` instance
sets how initial cell size is sampled in simulation
ouParams : :class:`OUParams`
sets Ornstein-Uhlenbeck parameters for simulation of growth rate
where : str
parent directory where the experiment will be exported
label : str
name of the experiment to be exported
force : bool (default False)
whether to overide previous same-label experiment folder
"""
exp = OUSimulation(
label=label,
simuParams=simuParams,
divisionParams=divParams,
ouParams=ouParams,
birthsizeParams=bsParams,
)
# check that experiment has been saved before
ans = "go"
current_name = label
exp_path = os.path.join(where, label)
if force:
if os.path.exists(exp_path):
shutil.rmtree(exp_path)
while os.path.exists(exp_path) and ans != "a":
print("Experiment {} already exists.".format(current_name))
ans = input("Override [o], Change experiment name [c], Abort [a]: ")
if ans == "c":
new_name = current_name
while new_name == current_name:
new_name = input("NEW NAME: ")
exp.label = new_name
exp_path = os.path.join(where, new_name)
# when overriding, need to erase everything first
elif ans == "o":
shutil.rmtree(exp_path)
# export except if process has been aborted
if ans != "a":
exp.raw_text_export(path=where)
class OUSimulation(Experiment):
"""Equivalent of Experiment class for simulation.
An instance of OUSimulation contains all information to simulate an
Ornstain-Uhlenbeck process in dividing cells.
Parameters
----------
simuParams : SimuParams instance
provides simulation parameters (number of containers, number of
colonies per container, timing parameters)
divisionParams : DivisionParams instance
sets the division timing process
ouParams : OUParams instance
set the parameters for the Ornstein Uhlenbeck process
"""
def __init__(
self,
label=None,
simuParams=None,
divisionParams=None,
ouParams=None,
birthsizeParams=None,
filter_set=None,
):
today = datetime.datetime.today()
self.date = today
if simuParams is None:
simuParams = SimuParams()
print("Using default SimuParams:\n{}".format(simuParams))
if divisionParams is None:
divisionParams = DivisionParams()
print("Using default DivisionParams:\n{}".format(divisionParams))
if ouParams is None:
ouParams = OUParams()
print("Using default OUParams:\n{}".format(ouParams))
if birthsizeParams is None:
birthsizeParams = SampleInitialSize()
if label is None:
self._label = "simu_{}".format(today.strftime("%Y-%m-%d_%H-%M-%S"))
else:
self._label = label
self.abspath = "{}".format(hex(id(self))) # Experiment compatibility
self.filetype = "simu"
self.datatype = [
("time", "f8"),
("ou", "f8"),
("ou_int", "f8"),
("exp_ou_int", "f8"),
("cellID", "u2"),
("parentID", "u2"),
]
self.containers = [] # there are no file
self.simuParams = simuParams
self.divisionParams = divisionParams
self.ouParams = ouParams
self.birthsizeParams = birthsizeParams
self._set_metadata()
# set filterset
self.fset = filter_set
return
def _set_metadata(self):
dic = {
"experiment": self.label, # mandatory
"period": self.simuParams.period, # mandatory
"date": self.date.strftime("%Y-%m-%d"), # optional
}
# optional, but convenient: paramaters as sub-dicts
dic["simu_params"] = {k: v for k, v in self.simuParams.content}
dic["division_params"] = {k: v for k, v in self.divisionParams.content}
dic["ornstein_uhlenbeck_params"] = {k: v for k, v in self.ouParams.content}
dic["birth_size_params"] = {k: v for k, v in self.birthsizeParams.content}
# dics = {key: {self.label: value} for key, value in content}
# self.metadata = pd.DataFrame(dics)
self.metadata = Metadata([dic])
return
@property
def label(self):
return self._label
@label.setter
def label(self, lab):
if not isinstance(lab, str):
lab = repr(lab)
self._label = lab
# reset metadata in case label is changed
self._set_metadata()
return
def info(self):
msg = (
"Ornstein Uhlenbeck simulation on dividing cells" + "\n"
"-----------------------------------------------" + "\n"
"{}".format(self.simuParams) + "\n"
"{}".format(self.divisionParams) + "\n"
"{}".format(self.ouParams)
)
return msg
def iter_containers(
self,
size=None,
read=True,
build=True, # useless options
prefilt=None, # only used for compatibility
extend_observables=True, # idem
report_NaNs=True, # idem
shuffle=False,
): # idem
if size is None:
size = self.simuParams.nbr_container
# determine number of digits
ndigits = len("{:d}".format(size))
# random seed
np.random.seed(self.simuParams.seed)
# start loop over containers
for index in tqdm(range(size), desc="simulation"):
label = "container_{nbr:0{digits:d}d}".format(digits=ndigits, nbr=index + 1)
yield OUContainer(
self,
label=label,
simuParams=self.simuParams,
divisionParams=self.divisionParams,
ouParams=self.ouParams,
birthsizeParams=self.birthsizeParams,
)
return
class OUContainer(Container):
"""Ornstein-Uhlenbeck simulation container.
This is subclassed from :class:`tunacell.base.container.Container`,
only ``__init__`` changes.
Parameters
----------
simu : OUSimulation instance
label : str
label for the container
simuParams : SimuParams instance
divisionParams : DivisionParams instance
ouParams : OUParams instance
birthsizeParams : SampleInitialSize instance
"""
def __init__(
self,
simu,
label=None,
simuParams=None,
divisionParams=None,
ouParams=None,
birthsizeParams=None,
):
"""Runs the simulation upon call.
"""
self.exp = simu # Container compatibility
self.abspath = "{}".format(hex(id(self))) # Container compatibility
self.filetype = "simulations"
self.datatype = simu.datatype
self.metadata = simu.metadata
if label is not None:
self.label = label
else:
self.label = str(uuid.uuid1())[:8]
self.cells = []
self.trees = []
nodes = []
trees = []
count = 0
for samp in range(simuParams.nbr_colony_per_container):
colony, count = ou_tree(
ouParams,
divisionParams,
birthsizeParams,
count=count + 1,
tstart=simuParams.start,
tstop=simuParams.stop,
dt=simuParams.period,
)
colony.container = self
trees.append(colony)
for node in colony.all_nodes():
nodes.append(node)
self.trees = trees
self.cells = nodes
return
class OUParams(object):
"""Class to store Ornstein-Uhlenbeck parameters.
Parameters
----------
target : float (default np.log(2)/60)
target value for growth rate, default value is one doubling time per hour in units of min^-1
spring : float (default 1/30)
spring constant (inverse of autocorrelation time), default value is 1/30 per minute
noise : float (default 2.0/30.0 * (np.log(2.) / 600.)**2)
sets the noise intensity, default value is chosen to get a 10% standard deviation at steady state
with the two other default parameters
See also
--------
Gillespie, D.T., Phys Rev E, vol 54, pp 2084-2091 (1996)
"""
def __init__(
self,
target=np.log(2.0) / 60,
spring=1.0 / 30.0,
noise=2.0 / 30.0 * (np.log(2.0) / 600.0) ** 2,
):
self.target = target
self.spring = spring
self.noise = noise
# metadata helper
self.content = [
("target", float(target)),
("spring", float(spring)),
("noise", float(noise)),
]
return
def __repr__(self):
lab = ""
lab += "Ornstein-Uhlenbeck parameters:\n"
lab += "Equation: dx/dt = -k * (x - x_0) + xi(t)\n"
lab += "with <xi(t)xi(s)> = eta^2 delta(t-s)\n"
lab += "* x_0, target value: {}\n".format(self.target)
lab += "* k, spring constant: {}\n".format(self.spring)
lab += "* eta^2, noise intensity: {}\n".format(self.noise)
return lab
class OUsteps(object):
"""Class that computes Gillespie-like prefactors for OU updates.
Parameters
----------
params : OUParams instance
set of parameters for OU process
dt : float
time interval for update.
Attributes
----------
mu : float
target value
sigma : float
sigma_y : float
kappa : float
See also
--------
Gillespie, D.T., Phys Rev E, vol 54, pp 2084-2091 (1996)
"""
def __init__(self, params, dt=1.0):
# Gillespie-like parameters
k = params.spring
c = params.noise
mu = np.exp(-k * dt)
self.mu = mu
sx2 = c / (2.0 * k) * (1.0 - np.exp(-2.0 * k * dt))
sy2 = c / (k ** 3) * (k * dt - 2.0 * (1.0 - mu) + 0.5 * (1.0 - mu ** 2))
kappa = c * (1.0 - mu) ** 2 / (2.0 * k ** 2)
self.sigma = np.sqrt(sx2)
self.sigma_y = np.sqrt(sy2)
self.kappa = kappa
return
def ou_track(params, dt=1.0, steps=10, start=0.0, y_start=1.0):
"""Sample OU process X(t) and its integral Y(t).
Beware that X(t) is the zero mean Ornstein-Uhlenbeck process.
Parameters
----------
params : OUParams instance
dt : float
time interval between two steps
steps : int
number of steps in track
start : float
value of OU process at initial step (step 0)
y_start : float
value of integrated OU process at initial step (step 0)
Returns
-------
(OU X(t), OU Y(t))
OU X(t) : ndarray
Ornstein-Uhlenbeck process values sampled at time interval dt
OU Y(t) : ndarray
Ornstein-Uhlenbeck integrated process values
Notes
-----
X(t) is defined by:
..math::
\frac{d X(t)}{dt} = - k * X(t) + c^{1/2} \Gamma(t)
where :math:`Gamma(t)` is a Gaussian white noise with zero mean and
covariance
..math::
\langle \Gamma(t) \Gamma(s) \rangle = \delta(t-s)
where :math:`\delta(t-s)` is the Dirac delta function.
The integrated process Y(t) follows
..math::
\frac{d Y(t)}{dt} = X(t)
See also
--------
Gillespie, D.T., Phys Rev E, vol 54, pp 2084-2091 (1996)
"""
xs = np.zeros(steps + 1, dtype="f8")
ys = np.zeros(steps + 1, dtype="f8")
ns = np.random.normal(0.0, 1.0, size=(steps, 2))
xs[0] = start
ys[0] = y_start
k = params.spring # spring constant
ou = OUsteps(params, dt=dt) # calling to get Gillespie-like step params
mu = ou.mu
sigma_x = ou.sigma
sigma_y = ou.sigma_y
kappa = ou.kappa
for i, (n1, n2) in enumerate(ns, start=1):
# Gillespie update
xs[i] = xs[i - 1] * mu + sigma_x * n1
ys[i] = ys[i - 1] + (
xs[i - 1] * (1.0 - mu) / k
+ np.sqrt((sigma_y) ** 2 - (kappa ** 2 / sigma_x ** 2)) * n2
+ kappa / sigma_x * n1
)
return xs, ys
def ou_tree(
ouparams, divparams, birthsizeparams, count=None, tstart=0.0, tstop=300.0, dt=5.0
):
"""Generates recursively OU process on dividing cells.
Parameters
----------
ouparams : OUParams instance
divparams : DivisionParams instance
birthsizeparams : SampleInitialSize instance
count : int
label for root cell
tstart : float
time at which simulation starts
tstop : float
time at which simulation stops
dt : float
time interval at which value of OU process are recorded. Such value is
used to reject smaller interdivision times.
Returns
-------
tree : Colony instance
in which process is stored in each node Ecoli.data
count : int
last cell label used
"""
if count is not None:
rootid = str(count) # labeling by integers (exported as strings)
else:
rootid = None # automatic labeling
root = root_cell(
ouparams, divparams, birthsizeparams, identifier=rootid, tstart=tstart, dt=dt
)
tree = Colony()
tree.add_node(root)
count = add_recursive_branch(
root,
tree,
count=count,
tstart=tstart,
tstop=tstop,
dt=dt,
ouparams=ouparams,
divparams=divparams,
)
return tree, count
def root_cell(
ouparams, divparams, birthsizeparams, identifier=None, tstart=0.0, dt=5.0
):
"""Set state for root cell, that initialize a sample.
Parameters
----------
ouparams : OUParams instance
store OU parameters: target, spring, noise
divparams : DivisionParams instance
store information to generate random cell cycle duration
birthsizeParams : SampleInitialSize instance
sets sampling of root cell birth size
identifier : str or int (default None)
identifier to give to root cell. Give an integer to increase +1.
tstart : float (default 0.)
start of recording simulations
dt : float (default 5.)
time between two successive acquisitions; smaller interdivision times
are rejected.
birth_size : float (default 1.)
size at birth for the root cell
"""
# start with OU equilibrium sample for alpha
equilibrium_mean = ouparams.target
equilibrium_std = np.sqrt(ouparams.noise / (2.0 * ouparams.spring))
birth_growth_rate = -1
while birth_growth_rate < 0: # prevent surprises...
birth_growth_rate = np.random.normal(
loc=equilibrium_mean, scale=equilibrium_std
)
birth_size = birthsizeparams.rv()
if divparams.use_growth_rate == "parameter":
growth_rate = ouparams.target
elif divparams.use_growth_rate == "birth":
growth_rate = birth_growth_rate
# reject lifetime smaller than dt
counter = 0
lifetime_root = -1
while lifetime_root < dt and counter < 1000:
lifetime_root = divparams.rv(birth_size, growth_rate)
if lifetime_root < dt:
logging.info(
"Rejecting interdivision time {} < period {}".format(lifetime_root, dt)
)
counter += 1
if counter == 1000:
raise ValueError("Interdivision time always larger than acquisition period.")
age_root = np.random.uniform()
birth_root = tstart - age_root * lifetime_root
root = Ecoli(
identifier=identifier,
parent=None,
birth_time=birth_root,
lifetime=lifetime_root,
)
root.birth_value = (birth_growth_rate, np.log(birth_size))
return root
def add_recursive_branch(
ecoli,
tree,
count=None,
tstart=0.0,
tstop=300.0,
dt=5.0,
ouparams=OUParams(),
divparams=DivisionParams(),
):
"""Main function for generating the tree and simulated process.
"""
x_start, y_start = ecoli.birth_value
t_birth = ecoli.birth_time
t_div = ecoli.division_time
if count is None: # in this case, identifier is a 36 char string from uuid
ecoli.tag = ecoli.identifier[:8]
if isinstance(ecoli.identifier, str):
idtype = "U36" # unicode string
else:
idtype = "S36" # byte string
def store_id(identifier):
return identifier
else:
ecoli.tag = ecoli.identifier
idtype = "u2"
# in this case, identifier is a string made from integer
def store_id(identifier):
return int(identifier)
# print 'add {}, birth {:.1f}, div {:.1f}'.format(ecoli.tag,
# ecoli.birth_time,
# ecoli.division_time)
# print(idtype)
first_rec_time = tstart + (np.floor((t_birth - tstart) / dt) + 1.0) * dt
first_rec_time = max(tstart, first_rec_time) # selection for root cell
rec_times = np.arange(first_rec_time, min(t_div, tstop), dt)
# (this was a selection for leaves: the ones that cross tstop)
cid = store_id(ecoli.identifier)
if ecoli.bpointer is None:
pid = store_id("0")
else:
pid = store_id(ecoli.bpointer)
ecoli.rec_times = rec_times
# Check that there is at least one recording time
if len(rec_times):
# first step from birth to first recording time
(xs, ys) = ou_track(
ouparams,
dt=rec_times[0] - t_birth,
steps=1,
start=x_start - ouparams.target,
y_start=y_start,
)
first_x = xs[-1]
first_y = ys[-1]
# record values
(xs, ys) = ou_track(
ouparams, dt=dt, steps=len(rec_times) - 1, start=first_x, y_start=first_y
)
x_rec_values = xs + ouparams.target
y_rec_values = ys + ouparams.target * (rec_times - t_birth)
length_like_values = np.exp(y_rec_values)
last_x = xs[-1]
last_y = ys[-1]
rec_ids = len(rec_times) * [cid]
rec_pids = len(rec_times) * [pid]
ecoli.data = np.zeros(
len(rec_times),
dtype=[
("time", "f8"),
("ou", "f8"),
("ou_int", "f8"),
("exp_ou_int", "f8"),
("cellID", idtype),
("parentID", idtype),
],
)
ecoli.data["time"] = rec_times
ecoli.data["ou"] = x_rec_values
ecoli.data["ou_int"] = y_rec_values
ecoli.data["exp_ou_int"] = length_like_values
ecoli.data["cellID"] = rec_ids
ecoli.data["parentID"] = rec_pids
last_dt = t_div - rec_times[-1]
# otherwise, data is set to empty array, but we update cycle bounds
else:
last_x = x_start - ouparams.target
last_y = y_start
last_dt = t_div - t_birth
# empty array since no recording time between birth and division
ecoli.data = np.array(
[],
dtype=[
("time", "f8"),
("ou", "f8"),
("ou_int", "f8"),
("exp_ou_int", "f8"),
("cellID", idtype),
("parentID", idtype),
],
)
# last step
(xs, ys) = ou_track(ouparams, dt=last_dt, steps=1, start=last_x, y_start=last_y)
ecoli.division_value = (
xs[-1] + ouparams.target,
ys[-1] + ouparams.target * (t_div - t_birth),
)
# create two daughter cells if time has not reached tmax
if t_div < tstop:
new_birth_size = np.exp(ecoli.division_value[1]) / 2.0 # symmetric division
new_alpha = ecoli.division_value[0]
for i in range(2):
if divparams.use_growth_rate == "parameter":
alpha = ouparams.target
elif divparams.use_growth_rate == "birth":
alpha = new_alpha
# compute cell interdivision time and accept if larger than period
counter = 0
lt = -1
while lt < dt and counter < 1000:
lt = divparams.rv(new_birth_size, alpha)
if lt < dt:
logging.info(
"Rejecting interdivision time {} < period {}".format(lt, dt)
)
counter += 1
if counter == 1000: # stupid control
raise ValueError(
"Interdivision time always smaller than acquisition period"
)
# moving on
if count is not None:
count += 1
newid = str(count)
else:
newid = None # will create identifier as uuid item
necoli = Ecoli(identifier=newid, parent=ecoli, lifetime=lt)
# update necoli.birth_value for y process
necoli.birth_value = (new_alpha, np.log(new_birth_size))
tree.add_node(necoli, parent=ecoli.identifier)
count = add_recursive_branch(
necoli,
tree,
count=count,
tstart=tstart,
tstop=tstop,
dt=dt,
ouparams=ouparams,
divparams=divparams,
)
return count # this is the last taken integer + 1
|
LeBarbouze/tunacell
|
tunacell/simu/ou.py
|
Python
|
mit
| 22,612
|
[
"DIRAC",
"Gaussian"
] |
7b65e4685938c9aaa423940514a6dc8e594d15968605073b6da9a04412f96141
|
# shelve.py - save/restore working directory state
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""save and restore changes to the working directory
The "hg shelve" command saves changes made to the working directory
and reverts those changes, resetting the working directory to a clean
state.
Later on, the "hg unshelve" command restores the changes saved by "hg
shelve". Changes can be restored even after updating to a different
parent, in which case Mercurial's merge machinery will resolve any
conflicts if necessary.
You can have more than one shelved change outstanding at a time; each
shelved change has a distinct name. For details, see the help for "hg
shelve".
"""
from mercurial.i18n import _
from mercurial.node import nullid, nullrev, bin, hex
from mercurial import changegroup, cmdutil, scmutil, phases, commands
from mercurial import error, hg, mdiff, merge, patch, repair, util
from mercurial import templatefilters, changegroup, exchange
from mercurial import lock as lockmod
from hgext import rebase
import errno
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
class shelvedfile(object):
"""Helper for the file storing a single shelve
Handles common functions on shelve files (.hg/.files/.patch) using
the vfs layer"""
def __init__(self, repo, name, filetype=None):
self.repo = repo
self.name = name
self.vfs = scmutil.vfs(repo.join('shelved'))
if filetype:
self.fname = name + '.' + filetype
else:
self.fname = name
def exists(self):
return self.vfs.exists(self.fname)
def filename(self):
return self.vfs.join(self.fname)
def unlink(self):
util.unlink(self.filename())
def stat(self):
return self.vfs.stat(self.fname)
def opener(self, mode='rb'):
try:
return self.vfs(self.fname, mode)
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_("shelved change '%s' not found") % self.name)
def applybundle(self):
fp = self.opener()
try:
gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
changegroup.addchangegroup(self.repo, gen, 'unshelve',
'bundle:' + self.vfs.join(self.fname))
finally:
fp.close()
def writebundle(self, cg):
changegroup.writebundle(cg, self.fname, 'HG10UN', self.vfs)
class shelvedstate(object):
"""Handle persistence during unshelving operations.
Handles saving and restoring a shelved state. Ensures that different
versions of a shelved state are possible and handles them appropriately.
"""
_version = 1
_filename = 'shelvedstate'
@classmethod
def load(cls, repo):
fp = repo.opener(cls._filename)
try:
version = int(fp.readline().strip())
if version != cls._version:
raise util.Abort(_('this version of shelve is incompatible '
'with the version used in this repo'))
name = fp.readline().strip()
wctx = fp.readline().strip()
pendingctx = fp.readline().strip()
parents = [bin(h) for h in fp.readline().split()]
stripnodes = [bin(h) for h in fp.readline().split()]
finally:
fp.close()
obj = cls()
obj.name = name
obj.wctx = repo[bin(wctx)]
obj.pendingctx = repo[bin(pendingctx)]
obj.parents = parents
obj.stripnodes = stripnodes
return obj
@classmethod
def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
fp = repo.opener(cls._filename, 'wb')
fp.write('%i\n' % cls._version)
fp.write('%s\n' % name)
fp.write('%s\n' % hex(originalwctx.node()))
fp.write('%s\n' % hex(pendingctx.node()))
fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
fp.close()
@classmethod
def clear(cls, repo):
util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
def createcmd(ui, repo, pats, opts):
"""subcommand that creates a new shelve"""
def publicancestors(ctx):
"""Compute the public ancestors of a commit.
Much faster than the revset ancestors(ctx) & draft()"""
seen = set([nullrev])
visit = util.deque()
visit.append(ctx)
while visit:
ctx = visit.popleft()
yield ctx.node()
for parent in ctx.parents():
rev = parent.rev()
if rev not in seen:
seen.add(rev)
if parent.mutable():
visit.append(parent)
wctx = repo[None]
parents = wctx.parents()
if len(parents) > 1:
raise util.Abort(_('cannot shelve while merging'))
parent = parents[0]
# we never need the user, so we use a generic user for all shelve operations
user = 'shelve@localhost'
label = repo._bookmarkcurrent or parent.branch() or 'default'
# slashes aren't allowed in filenames, therefore we rename it
label = label.replace('/', '_')
def gennames():
yield label
for i in xrange(1, 100):
yield '%s-%02d' % (label, i)
shelvedfiles = []
def commitfunc(ui, repo, message, match, opts):
# check modified, added, removed, deleted only
for flist in repo.status(match=match)[:4]:
shelvedfiles.extend(flist)
hasmq = util.safehasattr(repo, 'mq')
if hasmq:
saved, repo.mq.checkapplied = repo.mq.checkapplied, False
try:
return repo.commit(message, user, opts.get('date'), match,
editor=cmdutil.getcommiteditor(**opts))
finally:
if hasmq:
repo.mq.checkapplied = saved
if parent.node() != nullid:
desc = "changes to '%s'" % parent.description().split('\n', 1)[0]
else:
desc = '(changes in empty repository)'
if not opts['message']:
opts['message'] = desc
name = opts['name']
wlock = lock = tr = bms = None
try:
wlock = repo.wlock()
lock = repo.lock()
bms = repo._bookmarks.copy()
# use an uncommitted transaction to generate the bundle to avoid
# pull races. ensure we don't print the abort message to stderr.
tr = repo.transaction('commit', report=lambda x: None)
if name:
if shelvedfile(repo, name, 'hg').exists():
raise util.Abort(_("a shelved change named '%s' already exists")
% name)
else:
for n in gennames():
if not shelvedfile(repo, n, 'hg').exists():
name = n
break
else:
raise util.Abort(_("too many shelved changes named '%s'") %
label)
# ensure we are not creating a subdirectory or a hidden file
if '/' in name or '\\' in name:
raise util.Abort(_('shelved change names may not contain slashes'))
if name.startswith('.'):
raise util.Abort(_("shelved change names may not start with '.'"))
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
stat = repo.status(match=scmutil.match(repo[None], pats, opts))
if stat[3]:
ui.status(_("nothing changed (%d missing files, see "
"'hg status')\n") % len(stat[3]))
else:
ui.status(_("nothing changed\n"))
return 1
phases.retractboundary(repo, phases.secret, [node])
fp = shelvedfile(repo, name, 'files').opener('wb')
fp.write('\0'.join(shelvedfiles))
bases = list(publicancestors(repo[node]))
cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve')
shelvedfile(repo, name, 'hg').writebundle(cg)
cmdutil.export(repo, [node],
fp=shelvedfile(repo, name, 'patch').opener('wb'),
opts=mdiff.diffopts(git=True))
if ui.formatted():
desc = util.ellipsis(desc, ui.termwidth())
ui.status(_('shelved as %s\n') % name)
hg.update(repo, parent.node())
finally:
if bms:
# restore old bookmarks
repo._bookmarks.update(bms)
repo._bookmarks.write()
if tr:
tr.abort()
lockmod.release(lock, wlock)
def cleanupcmd(ui, repo):
"""subcommand that deletes all shelves"""
wlock = None
try:
wlock = repo.wlock()
for (name, _) in repo.vfs.readdir('shelved'):
suffix = name.rsplit('.', 1)[-1]
if suffix in ('hg', 'files', 'patch'):
shelvedfile(repo, name).unlink()
finally:
lockmod.release(wlock)
def deletecmd(ui, repo, pats):
"""subcommand that deletes a specific shelve"""
if not pats:
raise util.Abort(_('no shelved changes specified!'))
wlock = None
try:
wlock = repo.wlock()
try:
for name in pats:
for suffix in 'hg files patch'.split():
shelvedfile(repo, name, suffix).unlink()
except OSError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_("shelved change '%s' not found") % name)
finally:
lockmod.release(wlock)
def listshelves(repo):
"""return all shelves in repo as list of (time, filename)"""
try:
names = repo.vfs.readdir('shelved')
except OSError, err:
if err.errno != errno.ENOENT:
raise
return []
info = []
for (name, _) in names:
pfx, sfx = name.rsplit('.', 1)
if not pfx or sfx != 'patch':
continue
st = shelvedfile(repo, name).stat()
info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
return sorted(info, reverse=True)
def listcmd(ui, repo, pats, opts):
"""subcommand that displays the list of shelves"""
pats = set(pats)
width = 80
if not ui.plain():
width = ui.termwidth()
namelabel = 'shelve.newest'
for mtime, name in listshelves(repo):
sname = util.split(name)[1]
if pats and sname not in pats:
continue
ui.write(sname, label=namelabel)
namelabel = 'shelve.name'
if ui.quiet:
ui.write('\n')
continue
ui.write(' ' * (16 - len(sname)))
used = 16
age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
ui.write(age, label='shelve.age')
ui.write(' ' * (12 - len(age)))
used += 12
fp = open(name + '.patch', 'rb')
try:
while True:
line = fp.readline()
if not line:
break
if not line.startswith('#'):
desc = line.rstrip()
if ui.formatted():
desc = util.ellipsis(desc, width - used)
ui.write(desc)
break
ui.write('\n')
if not (opts['patch'] or opts['stat']):
continue
difflines = fp.readlines()
if opts['patch']:
for chunk, label in patch.difflabel(iter, difflines):
ui.write(chunk, label=label)
if opts['stat']:
for chunk, label in patch.diffstatui(difflines, width=width,
git=True):
ui.write(chunk, label=label)
finally:
fp.close()
def checkparents(repo, state):
"""check parent while resuming an unshelve"""
if state.parents != repo.dirstate.parents():
raise util.Abort(_('working directory parents do not match unshelve '
'state'))
def pathtofiles(repo, files):
cwd = repo.getcwd()
return [repo.pathto(f, cwd) for f in files]
def unshelveabort(ui, repo, state, opts):
"""subcommand that abort an in-progress unshelve"""
wlock = repo.wlock()
lock = None
try:
checkparents(repo, state)
util.rename(repo.join('unshelverebasestate'),
repo.join('rebasestate'))
try:
rebase.rebase(ui, repo, **{
'abort' : True
})
except Exception:
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise
lock = repo.lock()
mergefiles(ui, repo, state.wctx, state.pendingctx)
repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve')
shelvedstate.clear(repo)
ui.warn(_("unshelve of '%s' aborted\n") % state.name)
finally:
lockmod.release(lock, wlock)
def mergefiles(ui, repo, wctx, shelvectx):
"""updates to wctx and merges the changes from shelvectx into the
dirstate."""
oldquiet = ui.quiet
try:
ui.quiet = True
hg.update(repo, wctx.node())
files = []
files.extend(shelvectx.files())
files.extend(shelvectx.parents()[0].files())
# revert will overwrite unknown files, so move them out of the way
m, a, r, d, u = repo.status(unknown=True)[:5]
for file in u:
if file in files:
util.rename(file, file + ".orig")
cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
*pathtofiles(repo, files),
**{'no_backup': True})
finally:
ui.quiet = oldquiet
def unshelvecleanup(ui, repo, name, opts):
"""remove related files after an unshelve"""
if not opts['keep']:
for filetype in 'hg files patch'.split():
shelvedfile(repo, name, filetype).unlink()
def unshelvecontinue(ui, repo, state, opts):
"""subcommand to continue an in-progress unshelve"""
# We're finishing off a merge. First parent is our original
# parent, second is the temporary "fake" commit we're unshelving.
wlock = repo.wlock()
lock = None
try:
checkparents(repo, state)
ms = merge.mergestate(repo)
if [f for f in ms if ms[f] == 'u']:
raise util.Abort(
_("unresolved conflicts, can't continue"),
hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
lock = repo.lock()
util.rename(repo.join('unshelverebasestate'),
repo.join('rebasestate'))
try:
rebase.rebase(ui, repo, **{
'continue' : True
})
except Exception:
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise
shelvectx = repo['tip']
if not shelvectx in state.pendingctx.children():
# rebase was a no-op, so it produced no child commit
shelvectx = state.pendingctx
mergefiles(ui, repo, state.wctx, shelvectx)
state.stripnodes.append(shelvectx.node())
repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve')
shelvedstate.clear(repo)
unshelvecleanup(ui, repo, state.name, opts)
ui.status(_("unshelve of '%s' complete\n") % state.name)
finally:
lockmod.release(lock, wlock)
@command('unshelve',
[('a', 'abort', None,
_('abort an incomplete unshelve operation')),
('c', 'continue', None,
_('continue an incomplete unshelve operation')),
('', 'keep', None,
_('keep shelve after unshelving')),
('', 'date', '',
_('set date for temporary commits (DEPRECATED)'), _('DATE'))],
_('hg unshelve [SHELVED]'))
def unshelve(ui, repo, *shelved, **opts):
"""restore a shelved change to the working directory
This command accepts an optional name of a shelved change to
restore. If none is given, the most recent shelved change is used.
If a shelved change is applied successfully, the bundle that
contains the shelved changes is deleted afterwards.
Since you can restore a shelved change on top of an arbitrary
commit, it is possible that unshelving will result in a conflict
between your changes and the commits you are unshelving onto. If
this occurs, you must resolve the conflict, then use
``--continue`` to complete the unshelve operation. (The bundle
will not be deleted until you successfully complete the unshelve.)
(Alternatively, you can use ``--abort`` to abandon an unshelve
that causes a conflict. This reverts the unshelved changes, and
does not delete the bundle.)
"""
abortf = opts['abort']
continuef = opts['continue']
if not abortf and not continuef:
cmdutil.checkunfinished(repo)
if abortf or continuef:
if abortf and continuef:
raise util.Abort(_('cannot use both abort and continue'))
if shelved:
raise util.Abort(_('cannot combine abort/continue with '
'naming a shelved change'))
try:
state = shelvedstate.load(repo)
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_('no unshelve operation underway'))
if abortf:
return unshelveabort(ui, repo, state, opts)
elif continuef:
return unshelvecontinue(ui, repo, state, opts)
elif len(shelved) > 1:
raise util.Abort(_('can only unshelve one change at a time'))
elif not shelved:
shelved = listshelves(repo)
if not shelved:
raise util.Abort(_('no shelved changes to apply!'))
basename = util.split(shelved[0][1])[1]
ui.status(_("unshelving change '%s'\n") % basename)
else:
basename = shelved[0]
if not shelvedfile(repo, basename, 'files').exists():
raise util.Abort(_("shelved change '%s' not found") % basename)
oldquiet = ui.quiet
wlock = lock = tr = None
try:
lock = repo.lock()
wlock = repo.wlock()
tr = repo.transaction('unshelve', report=lambda x: None)
oldtiprev = len(repo)
pctx = repo['.']
tmpwctx = pctx
# The goal is to have a commit structure like so:
# ...-> pctx -> tmpwctx -> shelvectx
# where tmpwctx is an optional commit with the user's pending changes
# and shelvectx is the unshelved changes. Then we merge it all down
# to the original pctx.
# Store pending changes in a commit
m, a, r, d = repo.status()[:4]
if m or a or r or d:
ui.status(_("temporarily committing pending changes "
"(restore with 'hg unshelve --abort')\n"))
def commitfunc(ui, repo, message, match, opts):
hasmq = util.safehasattr(repo, 'mq')
if hasmq:
saved, repo.mq.checkapplied = repo.mq.checkapplied, False
try:
return repo.commit(message, 'shelve@localhost',
opts.get('date'), match)
finally:
if hasmq:
repo.mq.checkapplied = saved
tempopts = {}
tempopts['message'] = "pending changes temporary commit"
tempopts['date'] = opts.get('date')
ui.quiet = True
node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
tmpwctx = repo[node]
ui.quiet = True
shelvedfile(repo, basename, 'hg').applybundle()
nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)]
phases.retractboundary(repo, phases.secret, nodes)
ui.quiet = oldquiet
shelvectx = repo['tip']
# If the shelve is not immediately on top of the commit
# we'll be merging with, rebase it to be on top.
if tmpwctx.node() != shelvectx.parents()[0].node():
ui.status(_('rebasing shelved changes\n'))
try:
rebase.rebase(ui, repo, **{
'rev' : [shelvectx.rev()],
'dest' : str(tmpwctx.rev()),
'keep' : True,
})
except error.InterventionRequired:
tr.close()
stripnodes = [repo.changelog.node(rev)
for rev in xrange(oldtiprev, len(repo))]
shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise error.InterventionRequired(
_("unresolved conflicts (see 'hg resolve', then "
"'hg unshelve --continue')"))
# refresh ctx after rebase completes
shelvectx = repo['tip']
if not shelvectx in tmpwctx.children():
# rebase was a no-op, so it produced no child commit
shelvectx = tmpwctx
mergefiles(ui, repo, pctx, shelvectx)
shelvedstate.clear(repo)
# The transaction aborting will strip all the commits for us,
# but it doesn't update the inmemory structures, so addchangegroup
# hooks still fire and try to operate on the missing commits.
# Clean up manually to prevent this.
repo.unfiltered().changelog.strip(oldtiprev, tr)
unshelvecleanup(ui, repo, basename, opts)
finally:
ui.quiet = oldquiet
if tr:
tr.release()
lockmod.release(lock, wlock)
@command('shelve',
[('A', 'addremove', None,
_('mark new/missing files as added/removed before shelving')),
('', 'cleanup', None,
_('delete all shelved changes')),
('', 'date', '',
_('shelve with the specified commit date'), _('DATE')),
('d', 'delete', None,
_('delete the named shelved change(s)')),
('e', 'edit', False,
_('invoke editor on commit messages')),
('l', 'list', None,
_('list current shelves')),
('m', 'message', '',
_('use text as shelve message'), _('TEXT')),
('n', 'name', '',
_('use the given name for the shelved commit'), _('NAME')),
('p', 'patch', None,
_('show patch')),
('', 'stat', None,
_('output diffstat-style summary of changes'))] + commands.walkopts,
_('hg shelve [OPTION]... [FILE]...'))
def shelvecmd(ui, repo, *pats, **opts):
'''save and set aside changes from the working directory
Shelving takes files that "hg status" reports as not clean, saves
the modifications to a bundle (a shelved change), and reverts the
files so that their state in the working directory becomes clean.
To restore these changes to the working directory, using "hg
unshelve"; this will work even if you switch to a different
commit.
When no files are specified, "hg shelve" saves all not-clean
files. If specific files or directories are named, only changes to
those files are shelved.
Each shelved change has a name that makes it easier to find later.
The name of a shelved change defaults to being based on the active
bookmark, or if there is no active bookmark, the current named
branch. To specify a different name, use ``--name``.
To see a list of existing shelved changes, use the ``--list``
option. For each shelved change, this will print its name, age,
and description; use ``--patch`` or ``--stat`` for more details.
To delete specific shelved changes, use ``--delete``. To delete
all shelved changes, use ``--cleanup``.
'''
cmdutil.checkunfinished(repo)
allowables = [
('addremove', 'create'), # 'create' is pseudo action
('cleanup', 'cleanup'),
# ('date', 'create'), # ignored for passing '--date "0 0"' in tests
('delete', 'delete'),
('edit', 'create'),
('list', 'list'),
('message', 'create'),
('name', 'create'),
('patch', 'list'),
('stat', 'list'),
]
def checkopt(opt):
if opts[opt]:
for i, allowable in allowables:
if opts[i] and opt != allowable:
raise util.Abort(_("options '--%s' and '--%s' may not be "
"used together") % (opt, i))
return True
if checkopt('cleanup'):
if pats:
raise util.Abort(_("cannot specify names when using '--cleanup'"))
return cleanupcmd(ui, repo)
elif checkopt('delete'):
return deletecmd(ui, repo, pats)
elif checkopt('list'):
return listcmd(ui, repo, pats, opts)
else:
for i in ('patch', 'stat'):
if opts[i]:
raise util.Abort(_("option '--%s' may not be "
"used when shelving a change") % (i,))
return createcmd(ui, repo, pats, opts)
def extsetup(ui):
cmdutil.unfinishedstates.append(
[shelvedstate._filename, False, False,
_('unshelve already in progress'),
_("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
|
ya790206/temp_hg
|
hgext/shelve.py
|
Python
|
gpl-2.0
| 25,713
|
[
"VisIt"
] |
e0d1e573cbfb884c1445006c3abca5ee0c540de102b14a073b73e3f574c4706d
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-site-info
# Author : Stuart Paterson
########################################################################
"""
Print Configuration information for a given Site
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... Site ...' % Script.scriptName,
'Arguments:',
' Site: Name of the Site']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for site in args:
result = diracAdmin.getSiteSection(site, printOutput=True)
if not result['OK']:
errorList.append((site, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-admin-site-info.py
|
Python
|
gpl-3.0
| 1,179
|
[
"DIRAC"
] |
c77f09bff1c451005de054a7054214e5831ef19dd61d951764f2cd5cacb48eb9
|
"""
Spatial Error Models module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
from numpy import linalg as la
import ols as OLS
from pysal import lag_spatial
from utils import power_expansion, set_endog, iter_msg, sp_att
from utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments, get_spFilter, get_lags, _moments2eqs
from utils import spdot, RegressionPropsY, set_warn
import twosls as TSLS
import user_output as USER
import summary_output as SUMMARY
__all__ = ["GM_Error", "GM_Endog_Error", "GM_Combo"]
class BaseGM_Error(RegressionPropsY):
"""
GMM method for a spatial error model (note: no consistency checks
diagnostics or constant added); based on Kelejian and Prucha
(1998, 1999)[1]_ [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
>>> import pysal
>>> import numpy as np
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('HOVAL')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('CRIME')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseGM_Error(y, x, w=w.sparse)
>>> np.around(model.betas, decimals=4)
array([[ 47.6946],
[ 0.7105],
[ -0.5505],
[ 0.3257]])
"""
def __init__(self, y, x, w):
# 1a. OLS --> \tilde{betas}
ols = OLS.BaseOLS(y=y, x=x)
self.n, self.k = ols.x.shape
self.x = ols.x
self.y = ols.y
# 1b. GMM --> \tilde{\lambda1}
moments = _momentsGM_Error(w, ols.u)
lambda1 = optim_moments(moments)
# 2a. OLS -->\hat{betas}
xs = get_spFilter(w, lambda1, self.x)
ys = get_spFilter(w, lambda1, self.y)
ols2 = OLS.BaseOLS(y=ys, x=xs)
# Output
self.predy = spdot(self.x, ols2.betas)
self.u = y - self.predy
self.betas = np.vstack((ols2.betas, np.array([[lambda1]])))
self.sig2 = ols2.sig2n
self.e_filtered = self.u - lambda1 * w * self.u
self.vm = self.sig2 * ols2.xtxi
se_betas = np.sqrt(self.vm.diagonal())
self._cache = {}
class GM_Error(BaseGM_Error):
"""
GMM method for a spatial error model, with results and diagnostics; based
on Kelejian and Prucha (1998, 1999)[1]_ [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object (always needed)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array([dbf.by_col('HOVAL')]).T
Extract CRIME (crime) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'CRIME']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Error(y, x, w=w, name_y='hoval', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas).
>>> print model.name_x
['CONSTANT', 'income', 'crime', 'lambda']
>>> np.around(model.betas, decimals=4)
array([[ 47.6946],
[ 0.7105],
[ -0.5505],
[ 0.3257]])
>>> np.around(model.std_err, decimals=4)
array([ 12.412 , 0.5044, 0.1785])
>>> np.around(model.z_stat, decimals=6) #doctest: +SKIP
array([[ 3.84261100e+00, 1.22000000e-04],
[ 1.40839200e+00, 1.59015000e-01],
[ -3.08424700e+00, 2.04100000e-03]])
>>> round(model.sig2,4)
198.5596
"""
def __init__(self, y, x, w,
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error.__init__(self, y=y, x=x_constant, w=w.sparse)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error(RegressionPropsY):
'''
GMM method for a spatial error model with endogenous variables (note: no
consistency checks, diagnostics or constant added); based on Kelejian and
Prucha (1998, 1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
>>> import pysal
>>> import numpy as np
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> yend = np.array([dbf.by_col('HOVAL')]).T
>>> q = np.array([dbf.by_col('DISCBD')]).T
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseGM_Endog_Error(y, x, yend, q, w=w.sparse)
>>> np.around(model.betas, decimals=4)
array([[ 82.573 ],
[ 0.581 ],
[ -1.4481],
[ 0.3499]])
'''
def __init__(self, y, x, yend, q, w):
# 1a. TSLS --> \tilde{betas}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.n, self.k = tsls.z.shape
self.x = tsls.x
self.y = tsls.y
self.yend, self.z = tsls.yend, tsls.z
# 1b. GMM --> \tilde{\lambda1}
moments = _momentsGM_Error(w, tsls.u)
lambda1 = optim_moments(moments)
# 2a. 2SLS -->\hat{betas}
xs = get_spFilter(w, lambda1, self.x)
ys = get_spFilter(w, lambda1, self.y)
yend_s = get_spFilter(w, lambda1, self.yend)
tsls2 = TSLS.BaseTSLS(ys, xs, yend_s, h=tsls.h)
# Output
self.betas = np.vstack((tsls2.betas, np.array([[lambda1]])))
self.predy = spdot(tsls.z, tsls2.betas)
self.u = y - self.predy
self.sig2 = float(np.dot(tsls2.u.T, tsls2.u)) / self.n
self.e_filtered = self.u - lambda1 * w * self.u
self.vm = self.sig2 * tsls2.varb
self._cache = {}
class GM_Endog_Error(BaseGM_Endog_Error):
'''
GMM method for a spatial error model with endogenous variables, with
results and diagnostics; based on Kelejian and Prucha (1998, 1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the CRIME column (crime rates) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array([dbf.by_col('CRIME')]).T
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in.
>>> x = np.array([dbf.by_col('INC')]).T
In this case we consider HOVAL (home value) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yend = np.array([dbf.by_col('HOVAL')]).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for HOVAL. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = np.array([dbf.by_col('DISCBD')]).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Endog_Error(y, x, yend, q, w=w, name_x=['inc'], name_y='crime', name_yend=['hoval'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
endogenous variables included.
>>> print model.name_z
['CONSTANT', 'inc', 'hoval', 'lambda']
>>> np.around(model.betas, decimals=4)
array([[ 82.573 ],
[ 0.581 ],
[ -1.4481],
[ 0.3499]])
>>> np.around(model.std_err, decimals=4)
array([ 16.1381, 1.3545, 0.7862])
'''
def __init__(self, y, x, yend, q, w,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error(reg=self, w=w, vm=vm)
class BaseGM_Combo(BaseGM_Endog_Error):
"""
GMM method for a spatial lag and error model, with endogenous variables
(note: no consistency checks, diagnostics or constant added); based on
Kelejian and Prucha (1998, 1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo(y, X, yend=yd2, q=q2, w=w.sparse)
Print the betas
>>> print np.around(np.hstack((reg.betas[:-1],np.sqrt(reg.vm.diagonal()).reshape(3,1))),3)
[[ 39.059 11.86 ]
[ -1.404 0.391]
[ 0.467 0.2 ]]
And lambda
>>> print 'Lamda: ', np.around(reg.betas[-1], 3)
Lamda: [-0.048]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo(y, X, yd2, q2, w=w.sparse)
>>> betas = np.array([['CONSTANT'],['INC'],['HOVAL'],['W_CRIME']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas[:-1], np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)))
[['CONSTANT' '50.0944' '14.3593']
['INC' '-0.2552' '0.5667']
['HOVAL' '-0.6885' '0.3029']
['W_CRIME' '0.4375' '0.2314']]
"""
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True):
BaseGM_Endog_Error.__init__(self, y=y, x=x, w=w, yend=yend, q=q)
class GM_Combo(BaseGM_Combo):
"""
GMM method for a spatial lag and error model with endogenous variables,
with results and diagnostics; based on Kelejian and Prucha (1998,
1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
z : array
nxk array of variables (combination of x and yend)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the CRIME column (crime rates) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo(y, X, w=w, name_y='crime', name_x=['income'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
spatial lag of the dependent variable. We can check the betas:
>>> print reg.name_z
['CONSTANT', 'income', 'W_crime', 'lambda']
>>> print np.around(np.hstack((reg.betas[:-1],np.sqrt(reg.vm.diagonal()).reshape(3,1))),3)
[[ 39.059 11.86 ]
[ -1.404 0.391]
[ 0.467 0.2 ]]
And lambda:
>>> print 'lambda: ', np.around(reg.betas[-1], 3)
lambda: [-0.048]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include HOVAL (home value) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo(y, X, yd, q, w=w, name_x=['inc'], name_y='crime', name_yend=['hoval'], name_q=['discbd'], name_ds='columbus')
>>> print reg.name_z
['CONSTANT', 'inc', 'hoval', 'W_crime', 'lambda']
>>> names = np.array(reg.name_z).reshape(5,1)
>>> print np.hstack((names[0:4,:], np.around(np.hstack((reg.betas[:-1], np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)))
[['CONSTANT' '50.0944' '14.3593']
['inc' '-0.2552' '0.5667']
['hoval' '-0.6885' '0.3029']
['W_crime' '0.4375' '0.2314']]
>>> print 'lambda: ', np.around(reg.betas[-1], 3)
lambda: [ 0.254]
"""
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, lag_q=lag_q)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y,
self.predy, yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo(reg=self, w=w, vm=vm)
def _momentsGM_Error(w, u):
try:
wsparse = w.sparse
except:
wsparse = w
n = wsparse.shape[0]
u2 = np.dot(u.T, u)
wu = wsparse * u
uwu = np.dot(u.T, wu)
wu2 = np.dot(wu.T, wu)
wwu = wsparse * wu
uwwu = np.dot(u.T, wwu)
wwu2 = np.dot(wwu.T, wwu)
wuwwu = np.dot(wu.T, wwu)
wtw = wsparse.T * wsparse
trWtW = np.sum(wtw.diagonal())
g = np.array([[u2[0][0], wu2[0][0], uwu[0][0]]]).T / n
G = np.array(
[[2 * uwu[0][0], -wu2[0][0], n], [2 * wuwwu[0][0], -wwu2[0][0], trWtW],
[uwwu[0][0] + wu2[0][0], -wuwwu[0][0], 0.]]) / n
return [G, g]
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import pysal
import numpy as np
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('HOVAL')]).T
names_to_extract = ['INC', 'CRIME']
x = np.array([dbf.by_col(name) for name in names_to_extract]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
model = GM_Error(y, x, w, name_y='hoval',
name_x=['income', 'crime'], name_ds='columbus')
print model.summary
|
spreg-git/pysal
|
pysal/spreg/error_sp.py
|
Python
|
bsd-3-clause
| 45,973
|
[
"COLUMBUS"
] |
f7e7ef67c54f5b81df48ca15cc1f105f6c62d9b1ee2c88111071ac97ddec7a49
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataflow_v1beta3.services.messages_v1_beta3 import (
MessagesV1Beta3AsyncClient,
)
from google.cloud.dataflow_v1beta3.services.messages_v1_beta3 import (
MessagesV1Beta3Client,
)
from google.cloud.dataflow_v1beta3.services.messages_v1_beta3 import pagers
from google.cloud.dataflow_v1beta3.services.messages_v1_beta3 import transports
from google.cloud.dataflow_v1beta3.types import messages
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MessagesV1Beta3Client._get_default_mtls_endpoint(None) is None
assert (
MessagesV1Beta3Client._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
MessagesV1Beta3Client._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MessagesV1Beta3Client._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MessagesV1Beta3Client._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
MessagesV1Beta3Client._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [MessagesV1Beta3Client, MessagesV1Beta3AsyncClient,]
)
def test_messages_v1_beta3_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.MessagesV1Beta3GrpcTransport, "grpc"),
(transports.MessagesV1Beta3GrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_messages_v1_beta3_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [MessagesV1Beta3Client, MessagesV1Beta3AsyncClient,]
)
def test_messages_v1_beta3_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_messages_v1_beta3_client_get_transport_class():
transport = MessagesV1Beta3Client.get_transport_class()
available_transports = [
transports.MessagesV1Beta3GrpcTransport,
]
assert transport in available_transports
transport = MessagesV1Beta3Client.get_transport_class("grpc")
assert transport == transports.MessagesV1Beta3GrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MessagesV1Beta3Client, transports.MessagesV1Beta3GrpcTransport, "grpc"),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
MessagesV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3Client),
)
@mock.patch.object(
MessagesV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3AsyncClient),
)
def test_messages_v1_beta3_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MessagesV1Beta3Client, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MessagesV1Beta3Client, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
MessagesV1Beta3Client,
transports.MessagesV1Beta3GrpcTransport,
"grpc",
"true",
),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
MessagesV1Beta3Client,
transports.MessagesV1Beta3GrpcTransport,
"grpc",
"false",
),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
MessagesV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3Client),
)
@mock.patch.object(
MessagesV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3AsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_messages_v1_beta3_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [MessagesV1Beta3Client, MessagesV1Beta3AsyncClient]
)
@mock.patch.object(
MessagesV1Beta3Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3Client),
)
@mock.patch.object(
MessagesV1Beta3AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MessagesV1Beta3AsyncClient),
)
def test_messages_v1_beta3_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MessagesV1Beta3Client, transports.MessagesV1Beta3GrpcTransport, "grpc"),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_messages_v1_beta3_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MessagesV1Beta3Client,
transports.MessagesV1Beta3GrpcTransport,
"grpc",
grpc_helpers,
),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_messages_v1_beta3_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_messages_v1_beta3_client_client_options_from_dict():
with mock.patch(
"google.cloud.dataflow_v1beta3.services.messages_v1_beta3.transports.MessagesV1Beta3GrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = MessagesV1Beta3Client(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MessagesV1Beta3Client,
transports.MessagesV1Beta3GrpcTransport,
"grpc",
grpc_helpers,
),
(
MessagesV1Beta3AsyncClient,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_messages_v1_beta3_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=None,
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [messages.ListJobMessagesRequest, dict,])
def test_list_job_messages(request_type, transport: str = "grpc"):
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = messages.ListJobMessagesResponse(
next_page_token="next_page_token_value",
)
response = client.list_job_messages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == messages.ListJobMessagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobMessagesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_job_messages_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages), "__call__"
) as call:
client.list_job_messages()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == messages.ListJobMessagesRequest()
@pytest.mark.asyncio
async def test_list_job_messages_async(
transport: str = "grpc_asyncio", request_type=messages.ListJobMessagesRequest
):
client = MessagesV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
messages.ListJobMessagesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_job_messages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == messages.ListJobMessagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobMessagesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_job_messages_async_from_dict():
await test_list_job_messages_async(request_type=dict)
def test_list_job_messages_pager(transport_name: str = "grpc"):
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
messages.ListJobMessagesResponse(
job_messages=[
messages.JobMessage(),
messages.JobMessage(),
messages.JobMessage(),
],
next_page_token="abc",
),
messages.ListJobMessagesResponse(job_messages=[], next_page_token="def",),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(),], next_page_token="ghi",
),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(), messages.JobMessage(),],
),
RuntimeError,
)
metadata = ()
pager = client.list_job_messages(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, messages.JobMessage) for i in results)
def test_list_job_messages_pages(transport_name: str = "grpc"):
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
messages.ListJobMessagesResponse(
job_messages=[
messages.JobMessage(),
messages.JobMessage(),
messages.JobMessage(),
],
next_page_token="abc",
),
messages.ListJobMessagesResponse(job_messages=[], next_page_token="def",),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(),], next_page_token="ghi",
),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(), messages.JobMessage(),],
),
RuntimeError,
)
pages = list(client.list_job_messages(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_job_messages_async_pager():
client = MessagesV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
messages.ListJobMessagesResponse(
job_messages=[
messages.JobMessage(),
messages.JobMessage(),
messages.JobMessage(),
],
next_page_token="abc",
),
messages.ListJobMessagesResponse(job_messages=[], next_page_token="def",),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(),], next_page_token="ghi",
),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(), messages.JobMessage(),],
),
RuntimeError,
)
async_pager = await client.list_job_messages(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, messages.JobMessage) for i in responses)
@pytest.mark.asyncio
async def test_list_job_messages_async_pages():
client = MessagesV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_messages),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
messages.ListJobMessagesResponse(
job_messages=[
messages.JobMessage(),
messages.JobMessage(),
messages.JobMessage(),
],
next_page_token="abc",
),
messages.ListJobMessagesResponse(job_messages=[], next_page_token="def",),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(),], next_page_token="ghi",
),
messages.ListJobMessagesResponse(
job_messages=[messages.JobMessage(), messages.JobMessage(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_job_messages(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MessagesV1Beta3Client(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MessagesV1Beta3Client(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MessagesV1Beta3Client(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MessagesV1Beta3Client(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MessagesV1Beta3Client(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MessagesV1Beta3GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MessagesV1Beta3GrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.MessagesV1Beta3GrpcTransport,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MessagesV1Beta3Client(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.MessagesV1Beta3GrpcTransport,)
def test_messages_v1_beta3_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MessagesV1Beta3Transport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_messages_v1_beta3_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dataflow_v1beta3.services.messages_v1_beta3.transports.MessagesV1Beta3Transport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MessagesV1Beta3Transport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("list_job_messages",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_messages_v1_beta3_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dataflow_v1beta3.services.messages_v1_beta3.transports.MessagesV1Beta3Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MessagesV1Beta3Transport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
def test_messages_v1_beta3_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dataflow_v1beta3.services.messages_v1_beta3.transports.MessagesV1Beta3Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MessagesV1Beta3Transport()
adc.assert_called_once()
def test_messages_v1_beta3_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MessagesV1Beta3Client()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MessagesV1Beta3GrpcTransport,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
],
)
def test_messages_v1_beta3_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MessagesV1Beta3GrpcTransport, grpc_helpers),
(transports.MessagesV1Beta3GrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_messages_v1_beta3_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=["1", "2"],
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MessagesV1Beta3GrpcTransport,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
],
)
def test_messages_v1_beta3_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_messages_v1_beta3_host_no_port():
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com"
),
)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_messages_v1_beta3_host_with_port():
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com:8000"
),
)
assert client.transport._host == "dataflow.googleapis.com:8000"
def test_messages_v1_beta3_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MessagesV1Beta3GrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_messages_v1_beta3_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MessagesV1Beta3GrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MessagesV1Beta3GrpcTransport,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
],
)
def test_messages_v1_beta3_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MessagesV1Beta3GrpcTransport,
transports.MessagesV1Beta3GrpcAsyncIOTransport,
],
)
def test_messages_v1_beta3_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MessagesV1Beta3Client.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = MessagesV1Beta3Client.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MessagesV1Beta3Client.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = MessagesV1Beta3Client.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = MessagesV1Beta3Client.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MessagesV1Beta3Client.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = MessagesV1Beta3Client.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = MessagesV1Beta3Client.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MessagesV1Beta3Client.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = MessagesV1Beta3Client.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = MessagesV1Beta3Client.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MessagesV1Beta3Client.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = MessagesV1Beta3Client.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = MessagesV1Beta3Client.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MessagesV1Beta3Client.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MessagesV1Beta3Transport, "_prep_wrapped_messages"
) as prep:
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MessagesV1Beta3Transport, "_prep_wrapped_messages"
) as prep:
transport_class = MessagesV1Beta3Client.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = MessagesV1Beta3AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = MessagesV1Beta3Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(MessagesV1Beta3Client, transports.MessagesV1Beta3GrpcTransport),
(MessagesV1Beta3AsyncClient, transports.MessagesV1Beta3GrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dataflow-client
|
tests/unit/gapic/dataflow_v1beta3/test_messages_v1_beta3.py
|
Python
|
apache-2.0
| 56,970
|
[
"Octopus"
] |
a70c4b8206b1af32531009cee4e030c3ed3af0d59e34c45faf141411f87d1085
|
import ddapp
import math
import textwrap
import drc as lcmdrc
import bot_core as lcmbotcore
import vtkAll as vtk
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp import objectmodel as om
from ddapp import lcmUtils
from ddapp import ik
from ddapp import cameraview
from ddapp import affordanceupdater
from ddapp import affordancemanager
from ddapp import segmentation
from ddapp import robotstate
from ddapp.debugVis import DebugData
from ddapp.utime import getUtime
from ddapp.ikplanner import ConstraintSet
import ddapp.tasks.robottasks as rt
from ddapp.ikparameters import IkParameters
from ddapp.timercallback import TimerCallback
import os
import functools
import numpy as np
import scipy.io
from ddapp.tasks.taskuserpanel import TaskUserPanel
from ddapp import drcargs
class DrivingPlanner(object):
def __init__(self, ikServer, robotSystem):
self.ikServer = ikServer
self.robotSystem = robotSystem
self.ikServer.connectStartupCompleted(self.initialize)
self.steeringAngleDegrees = 0.0
self.maxTurningRadius = 9.5
self.trajectoryX = 0
self.trajectoryY = 0.3
self.trajectoryAngle = 0
self.trajSegments = 25
self.wheelDistance = 1.4
self.tagToLocalTransform = transformUtils.transformFromPose([0,0,0],[1,0,0,0])
self.commandStreamChannel = 'JOINT_POSITION_GOAL'
self.drivingThrottleJoint = drcargs.getDirectorConfig()['drivingThrottleJoint']
self.drivingSteeringJoint = drcargs.getDirectorConfig()['drivingSteeringJoint']
self.akyIdx = robotstate.getDrakePoseJointNames().index( self.drivingThrottleJoint )
self.lwyIdx = robotstate.getDrakePoseJointNames().index( self.drivingSteeringJoint )
self.anklePositions = np.array([np.nan,np.nan])
self.jointLimitsMin = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.idleAngleSlack = 10
self.fineGrainedThrottleTravel = 10
self.steeringAngleOffset = 0
self.throttlePublishChannel = 'SINGLE_JOINT_POSITION_GOAL'
self.steeringPublishChannel = 'SINGLE_JOINT_POSITION_GOAL'
self.addSubscribers()
self.graspWheelAngle = None
self.graspWristAngle = None
self.kneeInPedal = 0
self.angleToleranceInDegrees = 10
self.distanceAbovePedal = 0.05
self.distanceAboveFootStartPose = 0.2
self.plans = []
self.throttleCommandTimer = TimerCallback(targetFps=5)
self.throttleCommandTimer.callback = self.publishThrottleCommand
self.throttleCommandMsg = None
self.steeringCommandTimer = TimerCallback(targetFps=5)
self.steeringCommandTimer.callback = self.publishSteeringCommand
self.steeringCommandMsg = None
def getInitCommands(self):
commands = [textwrap.dedent('''
% ------ driving planner startup ------
addpath([getenv('DRC_BASE'), '/software/control/matlab/planners/driving_planner']);
clear driving_planner_options;
driving_planner_options.listen_to_lcm_flag = 0;
driving_planner_options.qstar = q_nom;
dp = drivingPlanner(s.robot, driving_planner_options);
% ------ driving planner startup end ------
''')]
return commands
def addSubscribers(self):
lcmUtils.addSubscriber('THROTTLE_COMMAND', lcmdrc.trigger_finger_t , self.onThrottleCommand)
lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t , self.onSteeringCommand)
def initialize(self, ikServer, success):
if ikServer.restarted:
return
commands = self.getInitCommands()
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
# applies the properties to the driving planner object
def applyProperties(self):
commands = []
commands.append("dp.options.quat_tol = %r;" % self.quatTol)
commands.append("dp.options.tol = %r;" % self.positionTol)
commands.append("dp.options.seed_with_current = %r;" % self.seedWithCurrent)
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def updateWheelTransform(self, xyzquat):
commands = []
startPose = self.getPlanningStartPose()
commands.append("q0 = %s;" % ik.ConstraintBase.toColumnVectorString(startPose))
commands.append("xyzquat = %s;" % ik.ConstraintBase.toColumnVectorString(xyzquat))
commands.append("dp = dp.updateWheelTransform(xyzquat, q0);")
self.ikServer.comm.sendCommands(commands)
def planSafe(self, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planSafe(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planPreGrasp(self, depth=0.2, xyz_des=None, angle=0, speed=1, graspLocation='center', turnRadius=0.187):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.graspLocation = '%s';" % graspLocation)
commands.append("options.angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
if xyz_des is not None:
commands.append("options.xyz_des = {%s};",ik.ConstraintBase.toColumnVectorString(xyz_des))
startPose = self.getPlanningStartPose()
commands.append("dp.planPreGrasp(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planTouch(self, depth=0, xyz_des=None, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planTouch(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planRetract(self, depth=0.2, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %s;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planRetract(options, %s);" % ik.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planTurn(self, angle=0, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.turn_angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
commands.append("options.use_raw_angle = 1;")
startPose = self.getPlanningStartPose()
commands.append("dp.planTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planSteeringWheelTurn(self, speed=1, knotPoints=20, turnRadius=.187, gazeTol=0.3):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.N = %r;" % knotPoints)
commands.append("options.steering_gaze_tol = %r;" % gazeTol)
startPose = self.getPlanningStartPose()
commands.append("dp.planSteeringWheelTurn(options,%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSeed(self):
commands = []
startPose = self.getPlanningStartPose()
commands.append("dp.planSeed(%s);" % ik.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.q
# move left leg up a bit
def planLegUp(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
# targetFrame = transformUtils.copyFrame(lFoot2World)
# targetFrame.PreMultiply()
# targetFrame.Translate([0.0,0.0, self.distanceAboveFootStartPose])
targetFrame = transformUtils.copyFrame(om.findObjectByName('left foot up frame').transform)
footPoseConstraint = self.createLeftFootPoseConstraint(targetFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(footPoseConstraint)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_start'
cs.nominalPoseName = 'q_start'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingIn(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving knee in').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([0.0, 0.0, self.distanceAbovePedal])
else:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1], angleToleranceInDegrees=self.angleToleranceInDegrees)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
# add constraint that we hit intermediate frame, maybe doesn't have to be exact???
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.3,0.3]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegAbovePedal(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving knee in').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([0.0, 0, self.distanceAbovePedal])
else:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1], angleToleranceInDegrees=self.angleToleranceInDegrees)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingOut(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
# legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
# legUpFrame.PreMultiply()
# legUpFrame.Translate([0.0,0.0, self.distanceAboveFootStartPose])
legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot up frame').transform)
identityFrame = vtk.vtkTransform()
legUpConstraint = self.createLeftFootPoseConstraint(legUpFrame, tspan=[1,1], angleToleranceInDegrees=10)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legUpConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.7,0.7]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegEgressStart(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2RFoot = om.findObjectByName('left foot to right foot')
assert lFoot2RFoot
rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose)
lFootGoalFrame = transformUtils.concatenateTransforms([transformUtils.copyFrame(lFoot2RFoot.transform), rFoot2World])
legDownFrame = transformUtils.copyFrame(lFootGoalFrame)
identityFrame = vtk.vtkTransform()
legDownConstraint = self.createLeftFootPoseConstraint(legDownFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legDownConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegEgressFull(self):
legAbovePedalName = 'qtraj_leg_above_pedal'
self.planLegAbovePedal()
self.saveOriginalTraj(legAbovePedalName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planLegSwingOut(startPose=nextStartPose)
legSwingOutName = 'qtraj_leg_swing_out'
self.saveOriginalTraj(legSwingOutName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planLegEgressStart(startPose=nextStartPose)
legEgressStartName = 'qtraj_leg_egress_start'
self.saveOriginalTraj(legEgressStartName)
ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=10)
ikParameters = self.robotSystem.ikPlanner.mergeWithDefaultIkParameters(ikParameters)
listener = self.getManipPlanListener()
_ = self.concatenateAndRescaleTrajectories([legAbovePedalName, legSwingOutName, legEgressStartName], 'qtraj_foot_egress_start', 'ts', ikParameters)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planLegPedal(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot on pedal').transform)
else:
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
identityFrame = vtk.vtkTransform()
lfootPositionOrientationConstraint = ikPlanner.createPositionOrientationConstraint('l_foot', lfootConstraintFrame, identityFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(lfootPositionOrientationConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(quasiStaticShrinkFactor=1, maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
keyFramePlan = cs.planEndPoseGoal()
self.plans.append(keyFramePlan)
return keyFramePlan
def captureHandPose(self):
startPose = self.getPlanningStartPose()
self.wheelAngleBeforeReGrasp = self.getSteeringWheelAngle()
ikPlanner = self.robotSystem.ikPlanner
handName = 'left'
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
self.palmToWorldBeforeRegrasp = palmToWorld
def planSteeringWheelReGrasp(self, useLineConstraint=True):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_regrasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_regrasp_end'
handName = 'left'
handLinkName = 'l_hand'
maxMetersPerSecond = 0.1
retractDepth = 0.15
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorldBeforeRegrasp = self.palmToWorldBeforeRegrasp
finalTargetFrame = transformUtils.copyFrame(palmToWorldBeforeRegrasp)
finalTargetFrame.PreMultiply()
finalTargetFrame.RotateY(180)
finalPoseConstraint = self.createLeftPalmPoseConstraints(finalTargetFrame, tspan=[1,1])
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
palmPosition = palmToWorld.GetPosition()
_, finalPose = transformUtils.poseFromTransform(finalTargetFrame)
preGraspTargetFrame = transformUtils.transformFromPose(palmPosition, finalPose)
preGraspPoseConstraint = self.createLeftPalmPoseConstraints(preGraspTargetFrame, tspan=[0.5, 0.5])
allButLeftArmPostureConstraint = self.createAllButLeftArmPostureConstraint(startPoseName)
lockedBaseConstraint = ikPlanner.createLockedBasePostureConstraint(startPoseName)
lockedRightArmConstraint = ikPlanner.createLockedRightArmPostureConstraint(startPoseName)
lockedTorsoConstraint = ikPlanner.createLockedTorsoPostureConstraint(startPoseName)
constraints = [allButLeftArmPostureConstraint]
constraints.extend(finalPoseConstraint)
seedPoseName = 'q_regrasp_seed'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxDegreesPerSecond=60,
maxBodyTranslationSpeed=maxMetersPerSecond, rescaleBodyNames=[handLinkName], rescaleBodyPts=list(ikPlanner.getPalmPoint()))
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
endPose = constraintSet.runIk()
# move on line constraint
motionVector = np.array(preGraspTargetFrame.GetPosition()) - np.array(finalTargetFrame.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(preGraspTargetFrame.GetPosition()), motionVector)
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
constraintSet.constraints.extend(preGraspPoseConstraint)
# orientation constraint for 0.5, 1
_, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(handName, finalTargetFrame, graspToHandLinkFrame=palmToHand, positionTolerance=0.0, angleToleranceInDegrees=0.0)
orientationConstraint.tspan = np.array([0.5,0.6,0.8,1])
constraintSet.constraints.append(orientationConstraint)
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.runIkTraj()
self.plans.append(plan)
return plan
def createLeftFootPoseConstraint(self, targetFrame, tspan=[-np.inf, np.inf], angleToleranceInDegrees=0.0):
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_foot', targetFrame, vtk.vtkTransform(), angleToleranceInDegrees=angleToleranceInDegrees)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createPalmPoseConstraints(self, side, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftHandPoseConstraintOnWheel(self, depth=0.12, tspan=[-np.inf, np.inf]):
targetFrame = self.getSteeringWheelPalmFrame()
targetFrame.PreMultiply()
targetFrame.Translate([0.0, depth, 0.0])
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_hand_face', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def getSteeringWheelPalmFrame(self):
frame = transformUtils.copyFrame(om.findObjectByName('Steering Wheel frame').transform)
frame.PreMultiply()
frame.RotateX(90)
frame.PreMultiply()
frame.RotateZ(-90)
return frame
def planBarGrasp(self,depth=0.03, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(om.findObjectByName('right hand grab bar').transform)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0.2,0.8,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def planBarRetract(self, depth=0.3, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
maxBodyTranslationSpeed = 0.3
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(palmToWorld)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxBodyTranslationSpeed=0.3)
constraintSet.seedPoseName = 'q_bar_grab'
constraintSet.nominalPoseName = 'q_bar_grab'
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0.0], positionTolerance=0.02)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def createAllButLeftLegPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_leg)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButLeftArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButRightArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!r_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def captureLeftFootToRightFootTransform(self):
startPose = self.getPlanningStartPose()
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose)
lFoot2RFoot = transformUtils.concatenateTransforms([lFoot2World, rFoot2World.GetLinearInverse()])
vis.showFrame(lFoot2RFoot, 'left foot to right foot', scale=0.2, visible=False)
def computeDrivingTrajectories(self, steeringAngleDegrees, maxTurningRadius = 10, numTrajPoints = 50):
angle = -steeringAngleDegrees
if abs(angle) < 0.1:
angle = 1e-8
turningRadius = 1.0 / (angle * (1 / (maxTurningRadius * 170.0)))
turningCenter = [0, turningRadius, 0]
trajPoints = list()
for i in range(0, numTrajPoints):
theta = math.radians((40 / turningRadius) * i - 90)
trajPoint = np.asarray(turningCenter)+turningRadius*np.asarray([math.cos(theta), math.sin(theta), 0])
trajPoints.append(trajPoint)
leftTraj = list()
rightTraj = list()
for i in range(0, numTrajPoints - 1):
v1 = trajPoints[i + 1] - trajPoints[i]
v2 = np.cross(v1, [0, 0, 1])
v2 /= np.linalg.norm(v2)
leftTraj.append(trajPoints[i] - 0.5 * self.wheelDistance * v2)
rightTraj.append(trajPoints[i] + 0.5 * self.wheelDistance * v2)
return leftTraj, rightTraj
def transformDrivingTrajectory(self, drivingTraj):
transformedDrivingTraj = list()
transform = vtk.vtkTransform()
z_axis = self.tagToLocalTransform.TransformVector([0,0,1])
tag_origin = self.tagToLocalTransform.TransformPoint([0,0,0])
z_norm = np.linalg.norm(z_axis[0:2])
if z_norm > 1e-6:
z_axis_proj = z_axis[0:2] / z_norm
angle = math.degrees(math.atan2(z_axis_proj[1], z_axis_proj[0]))
else:
angle = 0
transform.Translate([tag_origin[0] , tag_origin[1], 0])
transform.RotateZ(self.trajectoryAngle + angle)
transform.Translate([self.trajectoryX, self.trajectoryY, 0])
for p in drivingTraj:
transformedPoint = np.asarray(transform.TransformPoint(p))
transformedDrivingTraj.append(transformedPoint)
return transformedDrivingTraj
def onThrottleCommand(self, msg):
# slider 0 is the coarse grained slider, slider 1 is for fine grained adjustment
slider = self.decodeThrottleMessage(msg)
const = np.rad2deg(self.jointLimitsMin[self.akyIdx])
ankleGoalPosition = const + slider[0]*self.coarseGrainedThrottleTravel + (slider[1]-1/2.0)*self.fineGrainedThrottleTravel
ankleGoalPositionRadians = np.deg2rad(ankleGoalPosition)
# trip the safety if slider[3] is < 1/2, emergency come off the throttle
if slider[3] < 0.5:
print 'Emergency stop, coming off the throttle'
print "setting l_leg_aky to it's min value"
ankleGoalPositionRadians = self.jointLimitsMin[self.akyIdx]
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = ankleGoalPositionRadians
msg.joint_name = drcargs.getDirectorConfig()['drivingThrottleJoint']
self.throttleCommandMsg = msg
def publishThrottleCommand(self):
if not self.throttleStreaming:
return
if self.throttleCommandMsg is None:
return
lcmUtils.publish(self.throttlePublishChannel, self.throttleCommandMsg)
def publishSteeringCommand(self):
if not self.steeringStreaming:
return
if self.steeringCommandMsg is None:
return
lcmUtils.publish(self.steeringPublishChannel, self.steeringCommandMsg)
def onSteeringCommand(self, msg):
steeringAngle = -msg.steering_angle
lwyPositionGoal = steeringAngle + self.steeringAngleOffset
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = lwyPositionGoal
msg.joint_name = self.drivingThrottleJoint
self.steeringCommandMsg = msg
def decodeThrottleMessage(self,msg):
slider = np.zeros(4)
slider[0] = msg.slider1
slider[1] = msg.slider2
slider[2] = msg.slider3
slider[3] = msg.slider4
return slider
def captureRobotPoseFromStreaming(self):
helper = lcmUtils.MessageResponseHelper(self.commandStreamChannel, lcmdrc.robot_state_t)
msg = helper.waitForResponse(timeout=1000, keepAlive=False)
if msg is None:
print "Didn't receive a JOINT_POSITION_GOAL message"
print "Are you streaming?"
return None
pose = robotstate.convertStateMessageToDrakePose(msg)
return pose
def planCarEntryPose(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False)
self.addPlan(plan)
def planArmsEgressPrep(self):
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
ikParameters = IkParameters(maxDegreesPerSecond=60)
midPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'pre_egress_left_arm', side='left')
midPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'pre_egress_right_arm', side='right')
# endPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'egress-arms')
# plan = ikPlanner.computeMultiPostureGoal([startPose, midPose, endPose], feetOnGround=False, ikParameters=ikParameters)
plan = ikPlanner.computePostureGoal(startPose, midPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planArmsEgressStart(self, startPose=None):
if startPose is None:
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'egress-left-arm', side='left')
endPose = ikPlanner.getMergedPostureFromDatabase(endPose, 'General', 'crane', side='right')
ikParameters = IkParameters(maxDegreesPerSecond=60)
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planArmsEgress(self):
self.planArmsEgressPrep()
armsEgressPrepName = 'qtraj_arms_prep'
self.saveOriginalTraj(armsEgressPrepName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planArmsEgressStart(startPose=nextStartPose)
armsEgressStartName = 'qtraj_arms_egress_start'
self.saveOriginalTraj(armsEgressStartName)
ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=60)
ikParameters = self.robotSystem.ikPlanner.mergeWithDefaultIkParameters(ikParameters)
listener = self.getManipPlanListener()
_ = self.concatenateAndRescaleTrajectories([armsEgressPrepName, armsEgressStartName], 'qtraj_arms_egress', 'ts', ikParameters)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def setSteeringWheelAndWristGraspAngles(self):
self.graspWheelAngle = np.deg2rad(self.userSpecifiedGraspWheelAngleInDegrees)
pose = self.getPlanningStartPose()
self.graspWristAngle = pose[self.lwyIdx]
def getSteeringWheelAngle(self):
if self.graspWristAngle is None or self.graspWheelAngle is None:
# this means wrist and hand haven't been initialized yet
return 0
pose = self.getPlanningStartPose()
lwyAngle = pose[self.lwyIdx]
wheelAngle = self.graspWheelAngle + lwyAngle - self.graspWristAngle
return wheelAngle
# executes regrasp plan, updates graspWristAngle, graspWheelAngle
def updateGraspOffsets(self):
pose = self.getPlanningStartPose()
#now that plan has finished update our graspWristAngle
self.graspWristAngle = pose[self.lwyIdx]
self.graspWheelAngle = self.wheelAngleBeforeReGrasp
def printSteeringWheelAngleInDegrees(self):
print np.rad2deg(self.getSteeringWheelAngle())
def addPlan(self, plan):
self.plans.append(plan)
def getManipPlanListener(self):
responseChannel = 'CANDIDATE_MANIP_PLAN'
responseMessageClass = lcmdrc.robot_plan_w_keyframes_t
return lcmUtils.MessageResponseHelper(responseChannel, responseMessageClass)
def saveOriginalTraj(self, name):
commands = ['%s = qtraj_orig;' % name]
self.robotSystem.ikServer.comm.sendCommands(commands)
def concatenateAndRescaleTrajectories(self, trajectoryNames, concatenatedTrajectoryName, junctionTimesName, ikParameters):
commands = []
commands.append('joint_v_max = repmat(%s*pi/180, r.getNumVelocities()-6, 1);' % ikParameters.maxDegreesPerSecond)
commands.append('xyz_v_max = repmat(%s, 3, 1);' % ikParameters.maxBaseMetersPerSecond)
commands.append('rpy_v_max = repmat(%s*pi/180, 3, 1);' % ikParameters.maxBaseRPYDegreesPerSecond)
commands.append('v_max = [xyz_v_max; rpy_v_max; joint_v_max];')
commands.append("max_body_translation_speed = %r;" % ikParameters.maxBodyTranslationSpeed)
commands.append("max_body_rotation_speed = %r;" % ikParameters.maxBodyRotationSpeed)
commands.append('rescale_body_ids = [%s];' % (','.join(['links.%s' % linkName for linkName in ikParameters.rescaleBodyNames])))
commands.append('rescale_body_pts = reshape(%s, 3, []);' % ik.ConstraintBase.toColumnVectorString(ikParameters.rescaleBodyPts))
commands.append("body_rescale_options = struct('body_id',rescale_body_ids,'pts',rescale_body_pts,'max_v',max_body_translation_speed,'max_theta',max_body_rotation_speed,'robot',r);")
commands.append('trajectories = {};')
for name in trajectoryNames:
commands.append('trajectories{end+1} = %s;' % name)
commands.append('[%s, %s] = concatAndRescaleTrajectories(trajectories, v_max, %s, %s, body_rescale_options);' % (concatenatedTrajectoryName, junctionTimesName, ikParameters.accelerationParam, ikParameters.accelerationFraction))
commands.append('s.publishTraj(%s, 1);' % concatenatedTrajectoryName)
self.robotSystem.ikServer.comm.sendCommands(commands)
return self.robotSystem.ikServer.comm.getFloatArray(junctionTimesName)
class DrivingPlannerPanel(TaskUserPanel):
def __init__(self, robotSystem):
TaskUserPanel.__init__(self, windowTitle='Driving Task')
self.robotSystem = robotSystem
self.drivingPlanner = DrivingPlanner(robotSystem.ikServer, robotSystem)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
self.apriltagSub = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
self.imageView = cameraview.CameraImageView(cameraview.imageManager, 'CAMERACHEST_RIGHT', 'right image view')
self.imageViewLeft = cameraview.CameraImageView(cameraview.imageManager, 'CAMERA_LEFT', 'left image view')
self.imageView.view.orientationMarkerWidget().Off()
self.imageView.view.backgroundRenderer().SetBackground([0,0,0])
self.imageView.view.backgroundRenderer().SetBackground2([0,0,0])
self.imageViewLeft.view.orientationMarkerWidget().Off()
self.imageViewLeft.view.backgroundRenderer().SetBackground([0,0,0])
self.imageViewLeft.view.backgroundRenderer().SetBackground2([0,0,0])
self.affordanceUpdater = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageView)
self.affordanceUpdaterLeft = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageViewLeft)
self.affordanceUpdater.prependImageName = True
self.affordanceUpdaterLeft.prependImageName = True
self.affordanceUpdater.projectAffordances = False
self.affordanceUpdaterLeft.projectAffordances = False
self.imageViewLayout.addWidget(self.imageView.view)
self.imageViewLayout.addWidget(self.imageViewLeft.view)
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.updateAndDrawTrajectory
def onAprilTag(self, msg):
cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, self.drivingPlanner.tagToLocalTransform)
def addButtons(self):
self.addManualButton('Start', self.onStart)
self.addManualButton('Update Wheel Location', self.onUpdateWheelLocation)
self.addManualButton('Plan Safe', self.onPlanSafe)
self.addManualButton('Plan Pre Grasp', self.onPlanPreGrasp)
self.addManualButton('Plan Touch', self.onPlanTouch)
self.addManualButton('Plan Retract', self.onPlanRetract)
self.addManualButton('Plan Turn', self.onPlanTurn)
self.addManualButton('Plan Wheel Re-Grasp', self.drivingPlanner.planSteeringWheelReGrasp)
self.addManualButton('Plan Bar Grab', self.onPlanBarGrasp)
self.addManualButton('Plan Bar Retract', self.onPlanBarRetract)
# self.addManualButton('Plan Steering Wheel Turn', self.onPlanSteeringWheelTurn)
# self.addManualButton('Plan Seed', self.drivingPlanner.planSeed)
# self.addManualButton('Capture Ankle Angle Low', functools.partial(self.drivingPlanner.captureAnklePosition, 0))
# self.addManualButton('Capture Ankle Angle High', functools.partial(self.drivingPlanner.captureAnklePosition, 1))
self.addManualButton('Capture Wheel and Wrist grasp angles', self.drivingPlanner.setSteeringWheelAndWristGraspAngles)
self.addManualButton('Print Steering Wheel Angle', self.drivingPlanner.printSteeringWheelAngleInDegrees)
self.addManualSpacer()
self.addManualButton('Arms Egress Prep', self.drivingPlanner.planArmsEgressPrep)
self.addManualButton('Arms Egress Start', self.drivingPlanner.planArmsEgressStart)
self.addManualButton('Plan Left Leg Egress Start', self.drivingPlanner.planLegEgressStart)
def addDefaultProperties(self):
self.params.addProperty('PreGrasp/Retract Depth', 0.2, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('Touch Depth', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('PreGrasp Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turn Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
# self.params.addProperty('Steering Wheel Radius (meters)', 0.1873, attributes=om.PropertyAttributes(singleStep=0.01))
# self.params.addProperty('Knot Points', 20, attributes=om.PropertyAttributes(singleStep=1))
# self.params.addProperty('Gaze Constraint Tol', 0.3, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
self.params.addProperty('Position Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Quat Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Grasp Location', 0, attributes=om.PropertyAttributes(enumNames=['Center','Rim']))
self.params.addProperty('Seed with current posture', 0, attributes=om.PropertyAttributes(enumNames=['False','True']))
self.params.addProperty('Speed', 0.75, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
# self.params.addProperty('Throttle Idle Angle Slack', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Coarse Grained Throttle Travel', 100, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Fine Grained Throttle Travel', 30, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Throttle Streaming', False)
self.params.addProperty('Steering Streaming', False)
self.params.addProperty('Bar Grasp/Retract Depth', 0.1, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Pedal Foot Location', 1, attributes=om.PropertyAttributes(enumNames=['Standard','Knee In']))
self.params.addProperty('Steering Wheel Angle when Grasped', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turning Radius', 9.5, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Wheel Separation', 1.4, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Segments', 25, attributes=om.PropertyAttributes(singleStep=1, decimals=0))
self.params.addProperty('Trajectory X Offset', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2)),
self.params.addProperty('Trajectory Y Offset', 0.30, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Angle Offset', 0.0, attributes=om.PropertyAttributes(singleStep=1, decimals=0)),
self.params.addProperty('Show Trajectory', False)
self.params.addProperty('Show Driving/Regrasp Tasks',0, attributes=om.PropertyAttributes(enumNames=['Ingress','Regrasp', 'Egress']))
self._syncProperties()
def _syncProperties(self):
self.preGraspDepth = self.params.getProperty('PreGrasp/Retract Depth')
self.touchDepth = self.params.getProperty('Touch Depth')
self.preGraspAngle = self.params.getProperty('PreGrasp Angle')
self.turnAngle = self.params.getProperty('Turn Angle')
self.speed = self.params.getProperty('Speed')
self.turnRadius = 0.18 #self.params.getProperty('Steering Wheel Radius (meters)')
self.knotPoints = 20
self.gazeTol = 0.3
self.drivingPlanner.positionTol = 0.0
self.drivingPlanner.quatTol = 0.0
self.graspLocation = 'center'
self.drivingPlanner.seedWithCurrent = self.params.getProperty('Seed with current posture')
# self.drivingPlanner.throttleIdleAngleSlack = self.params.getProperty('Throttle Idle Angle Slack')
self.drivingPlanner.fineGrainedThrottleTravel = self.params.getProperty('Fine Grained Throttle Travel')
self.drivingPlanner.coarseGrainedThrottleTravel = self.params.getProperty('Coarse Grained Throttle Travel')
self.drivingPlanner.throttleStreaming = self.params.getProperty('Throttle Streaming')
self.drivingPlanner.steeringStreaming = self.params.getProperty('Steering Streaming')
self.barGraspDepth = self.params.getProperty('Bar Grasp/Retract Depth')
self.drivingPlanner.maxTurningRadius = self.params.getProperty('Turning Radius')
self.drivingPlanner.userSpecifiedGraspWheelAngleInDegrees = self.params.getProperty('Steering Wheel Angle when Grasped')
self.drivingPlanner.trajSegments = self.params.getProperty('Trajectory Segments')
self.drivingPlanner.wheelDistance = self.params.getProperty('Wheel Separation')
self.drivingPlanner.trajectoryX = self.params.getProperty('Trajectory X Offset')
self.drivingPlanner.trajectoryY = self.params.getProperty('Trajectory Y Offset')
self.drivingPlanner.trajectoryAngle = self.params.getProperty('Trajectory Angle Offset')
self.drivingPlanner.kneeInPedal = self.params.getProperty('Pedal Foot Location')
self.taskToShow = self.params.getProperty('Show Driving/Regrasp Tasks')
self.drivingPlanner.applyProperties()
def onSteeringCommand(self, msg):
if msg.type == msg.TYPE_DRIVE_DELTA_STEERING:
self.drivingPlanner.steeringAngleDegrees = math.degrees(msg.steering_angle)
def onStart(self):
self.onUpdateWheelLocation()
print('Driving Planner Ready')
def onUpdateWheelLocation(self):
f = om.findObjectByName('Steering Wheel').getChildFrame().transform
xyzquat = transformUtils.poseFromTransform(f)
xyzquat = np.concatenate(xyzquat)
self.drivingPlanner.updateWheelTransform(xyzquat)
def onPlanSafe(self):
self.drivingPlanner.planSafe()
def onPlanPreGrasp(self, depth=None):
self.drivingPlanner.planPreGrasp(depth=self.preGraspDepth, speed=self.speed, angle=self.preGraspAngle,
graspLocation=self.graspLocation, turnRadius=self.turnRadius)
def onPlanTouch(self):
self._syncProperties()
self.drivingPlanner.planTouch(depth=self.touchDepth, speed=self.speed)
def onPlanRetract(self):
self._syncProperties()
self.drivingPlanner.planRetract(depth=self.preGraspDepth, speed=self.speed)
def onPlanTurn(self):
self._syncProperties()
self.drivingPlanner.planTurn(angle=self.turnAngle, speed=self.speed)
def onPlanSteeringWheelTurn(self):
self._syncProperties()
self.drivingPlanner.planSteeringWheelTurn(speed=self.speed, turnRadius=self.turnRadius, knotPoints=self.knotPoints, gazeTol=self.gazeTol)
def onPropertyChanged(self, propertySet, propertyName):
taskToShowOld = self.taskToShow
self._syncProperties()
if not taskToShowOld == self.taskToShow:
self.addTasks()
if propertyName == 'Throttle Streaming':
if self.params.getProperty(propertyName):
self.drivingPlanner.throttleCommandTimer.start()
else:
self.drivingPlanner.throttleCommandTimer.stop()
elif propertyName == 'Steering Streaming':
if self.params.getProperty(propertyName):
self.drivingPlanner.steeringCommandTimer.start()
else:
self.drivingPlanner.steeringCommandTimer.stop()
elif propertyName == 'Show Trajectory':
if self.params.getProperty(propertyName):
self.timer.start()
self.affordanceUpdater.timer.start()
self.affordanceUpdaterLeft.timer.start()
else:
self.timer.stop()
self.affordanceUpdater.cleanUp()
self.affordanceUpdaterLeft.cleanUp()
self.affordanceUpdater.extraObjects = []
self.affordanceUpdaterLeft.extraObjects = []
om.removeFromObjectModel(om.findObjectByName('driving trajectory'))
def onPlanBarRetract(self):
self.drivingPlanner.planBarRetract(depth=self.barGraspDepth, useLineConstraint=True)
def onPlanBarGrasp(self):
self.drivingPlanner.planBarGrasp(depth=self.barGraspDepth, useLineConstraint=True)
def setParamsPreGrasp1(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.22)
def setParamsPreGrasp2(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.12)
def setParamsWheelRetract(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.3)
def setParamsBarRetract(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.3)
def setParamsBarGrasp(self):
self.params.setProperty('Bar Grasp/Retract Depth', -0.015)
def startSteering(self):
self.params.setProperty('Steering Streaming', 1)
def stopSteering(self):
self.params.setProperty('Steering Streaming', 0)
def stopStreaming(self):
self.params.setProperty('Steering Streaming', 0)
self.params.setProperty('Throttle Streaming', 0)
def addTasks(self):
self.taskTree.removeAllTasks()
if self.taskToShow == 0:
self.addIngressTasks()
elif self.taskToShow == 1:
self.addRegraspTasks()
elif self.addEgressTasks() == 2:
self.addEgressTasks()
else:
return
def addIngressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
prep = addFolder('Prep')
addTask(rt.UserPromptTask(name="confirm user mode", message="Please go to User mode"))
addTask(rt.UserPromptTask(name="Confirm pressure", message='Confirm that pressure is set for ingress (2000 psi)'))
addTask(rt.UserPromptTask(name="start streaming", message="Please start streaming"))
addManipTask('car entry posture', self.drivingPlanner.planCarEntryPose, userPrompt=True)
self.folder = prep
addTask(rt.SetNeckPitch(name='set neck position', angle=30))
addFunc(self.drivingPlanner.captureLeftFootToRightFootTransform, 'capture lfoot to rfoot transform')
addTask(rt.UserPromptTask(name="spawn polaris model", message="launch egress planner and spawn polaris model"))
addFunc(self.onStart, 'update wheel location')
graspWheel = addFolder('Grasp Steering Wheel')
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsPreGrasp1, 'set params')
addManipTask('Pre Grasp 1', self.onPlanPreGrasp, userPrompt=True)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please ask field team for hand location relative to wheel, adjust wheel affordance if necessary"))
addFunc(self.setParamsPreGrasp2, 'set params')
addManipTask('Pre Grasp 2', self.onPlanPreGrasp, userPrompt=True)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please make any manual adjustments if necessary"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addTask(rt.UserPromptTask(name="set true steering wheel angle", message="Set true steering wheel angle in spin box"))
addFunc(self.drivingPlanner.setSteeringWheelAndWristGraspAngles, 'capture true wheel angle and current wrist angle')
graspBar = addFolder('Grasp Bar')
addTask(rt.OpenHand(name='open right hand', side='Right'))
addFunc(self.setParamsBarGrasp, 'set params')
addManipTask('Bar Grasp', self.onPlanBarGrasp, userPrompt=True)
self.folder = graspBar
addTask(rt.UserPromptTask(name="check alignment and depth", message="Please check alignment and depth, make any manual adjustments"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
footToDriving = addFolder('Foot to Driving Pose')
addManipTask('Foot Up', self.drivingPlanner.planLegUp, userPrompt=True)
self.folder = footToDriving
addManipTask('Swing leg in', self.drivingPlanner.planLegSwingIn , userPrompt=True)
self.folder = footToDriving
addManipTask('Foot On Pedal', self.drivingPlanner.planLegPedal, userPrompt=True)
driving = addFolder('Driving')
addTask(rt.UserPromptTask(name="launch drivers", message="Please launch throttle and steering drivers"))
addTask(rt.UserPromptTask(name="switch to regrasp tasks", message="Switch to regrasp task set"))
def addEgressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
addFolder('Stop throttle and steering')
addFunc(self.stopStreaming, 'stop steering and throttle streaming')
addTask(rt.UserPromptTask(name="Confirm steering and throttle streaming is off", message='Confirm steering and throttle streaming is off, move sliders and wheel to check'))
# footToEgress = addFolder('Foot to Egress Pose')
self.folder = None
addManipTask('Foot to Egress Start', self.drivingPlanner.planLegEgressFull, userPrompt=True)
# addManipTask('Foot Off Pedal', self.drivingPlanner.planLegAbovePedal, userPrompt=True)
# self.folder = footToEgress
# addManipTask('Swing leg out', self.drivingPlanner.planLegSwingOut , userPrompt=True)
# self.folder = footToEgress
# addManipTask('Foot Down', self.drivingPlanner.planLegEgressStart, userPrompt=True)
addFunc(self.onUpdateWheelLocation, 'Update wheel location')
ungraspWheel = addFolder('Ungrasp Steering Wheel')
addTask(rt.UserPromptTask(name="Confirm pressure", message='Confirm that pressure is set for prep-for-egress (2200 psi)'))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the left hand has opened"))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTask('Retract hand', self.onPlanRetract, userPrompt=True)
self.folder = ungraspWheel
addTask(rt.CloseHand(name='close left hand', side='Left'))
ungraspBar = addFolder('Ungrasp Bar')
addTask(rt.OpenHand(name='open right hand', side='Right'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the right hand has opened"))
addFunc(self.setParamsBarRetract, 'set params')
addManipTask('Retract hand', self.onPlanBarRetract, userPrompt=True)
self.folder = ungraspBar
addTask(rt.CloseHand(name='close Right hand', side='Right'))
# armsToEgressStart = addFolder('Arms to Egress Position')
self.folder = None
addManipTask('Arms to Egress', self.drivingPlanner.planArmsEgress, userPrompt=True)
# addManipTask('Arms To Egress Prep', self.drivingPlanner.planArmsEgressPrep, userPrompt=True)
# self.folder = armsToEgressStart
# addManipTask('Arms To Egress Start', self.drivingPlanner.planArmsEgressStart, userPrompt=True)
prep = addFolder('Stop Streaming')
addTask(rt.UserPromptTask(name='stop streaming base side', message='stop streaming base side'))
def addRegraspTasks(self):
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
regrasp = addFolder('Regrasp')
addFunc(self.stopSteering, 'stop steering commands')
addTask(rt.UserPromptTask(name="high pressure", message="set pump to 2400 psi"))
addFunc(self.onUpdateWheelLocation, 'update wheel location')
addFunc(self.drivingPlanner.captureHandPose, 'capture hand pose')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the left hand has opened"))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTask('Retract hand', self.onPlanRetract, userPrompt=True)
self.folder = regrasp
addManipTask('Plan Regrasp', self.drivingPlanner.planSteeringWheelReGrasp, userPrompt=True)
self.folder = regrasp
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addFunc(self.drivingPlanner.updateGraspOffsets, 'update steering wheel grasp offsets')
addTask(rt.UserPromptTask(name="driving pressure", message="set pump to 1500 psi"))
addTask(rt.UserPromptTask(name="reset steering wheel", message="Set the steering wheel to approximate lwy angle"))
addFunc(self.startSteering, 'start steering commands')
def updateAndDrawTrajectory(self):
if not self.params.getProperty('Show Trajectory') or om.findObjectByName('Steering Wheel') is None:
return None
steeringAngleDegrees = np.rad2deg(self.drivingPlanner.getSteeringWheelAngle())
leftTraj, rightTraj = self.drivingPlanner.computeDrivingTrajectories(steeringAngleDegrees, self.drivingPlanner.maxTurningRadius, self.drivingPlanner.trajSegments + 1)
d = DebugData()
for traj in [leftTraj, rightTraj]:
traj = self.drivingPlanner.transformDrivingTrajectory(traj)
numTrajPoints = len(traj)
for i in xrange(numTrajPoints):
rgb = [(numTrajPoints - i) / float(numTrajPoints), 1 - (numTrajPoints - i) / float(numTrajPoints), 1]
d.addSphere(traj[i], 0.05, rgb, resolution=12)
obj = vis.updatePolyData(d.getPolyData(), 'driving trajectory', colorByName='RGB255', parent='planning')
for updater in [self.affordanceUpdater, self.affordanceUpdaterLeft]:
updater.extraObjects = [obj]
return obj
|
gizatt/director
|
src/python/ddapp/drivingplanner.py
|
Python
|
bsd-3-clause
| 68,779
|
[
"VTK"
] |
e1aae6ab9523ad98dfca7f204ba2695d6e919294d4d70a017ec045fcc44dbc04
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=too-many-branches
from __future__ import (absolute_import, division, print_function)
import os
import tempfile
from collections import namedtuple
from contextlib import contextmanager
import numpy as np
from mantid.py3compat.enum import Enum
from mantid import config as mantid_config
from mantid.api import (DataProcessorAlgorithm, AlgorithmFactory, FileProperty,
WorkspaceProperty, FileAction, PropertyMode, mtd,
AnalysisDataService, Progress)
from mantid.simpleapi import (DeleteWorkspace, LoadEventNexus, SetGoniometer,
SetUB, ModeratorTzeroLinear, SaveNexus,
ConvertToMD, LoadMask, MaskDetectors, LoadNexus,
MDNormSCDPreprocessIncoherent, MDNormSCD,
MultiplyMD, CreateSingleValuedWorkspace,
ConvertUnits, CropWorkspace, DivideMD, MinusMD,
RenameWorkspace, ConvertToMDMinMaxGlobal,
ClearMaskFlag, ScaleX, Plus)
from mantid.kernel import (Direction, FloatArrayProperty,
FloatArrayLengthValidator, logger)
class VDAS(Enum):
"""Specifices the version of the Data Acquisition System (DAS)"""
v1900_2018 = 0 # Up to Dec 31 2018
v2019_2100 = 1 # From Jan 01 2018
@contextmanager
def pyexec_setup(new_options):
"""
Backup keys of mantid.config and clean up temporary files and workspaces
upon algorithm completion or exception raised.
Workspaces with names beginning with '_t_' are assumed temporary.
Parameters
----------
new_options: dict
Dictionary of mantid configuration options to be modified.
"""
# Hold in this tuple all temporary objects to be removed after completion
temp_objects = namedtuple('temp_objects', 'files workspaces')
temps = temp_objects(list(), list())
previous_config = dict()
for key, value in new_options.items():
previous_config[key] = mantid_config[key]
mantid_config[key] = value
try:
yield temps
finally:
# reinstate the mantid options
for key, value in previous_config.items():
mantid_config[key] = value
# delete temporary files
for file_name in temps.files:
os.remove(file_name)
# remove any workspace added to temps.workspaces or whose name begins
# with "_t_"
to_be_removed = set()
for name in AnalysisDataService.getObjectNames():
if '_t_' == name[0:3]:
to_be_removed.add(name)
for workspace in temps.workspaces:
if isinstance(workspace, str):
to_be_removed.add(workspace)
else:
to_be_removed.add(workspace.name())
for name in to_be_removed:
DeleteWorkspace(name)
class BASISCrystalDiffraction(DataProcessorAlgorithm):
_mask_file = '/SNS/BSS/shared/autoreduce/new_masks_08_12_2015/'\
'BASIS_Mask_default_diff.xml'
_solid_angle_ws_ = '/SNS/BSS/shared/autoreduce/solid_angle_diff.nxs'
_flux_ws_ = '/SNS/BSS/shared/autoreduce/int_flux.nxs'
_wavelength_bands = {'311': [3.07, 3.60], '111': [6.05, 6.60]}
_diff_bank_numbers = list(range(5, 14))
_tzero = dict(gradient=11.967, intercept=-5.0)
def __init__(self):
DataProcessorAlgorithm.__init__(self)
self._wavelength_band = None # units of inverse Angstroms
self._short_inst = "BSS"
self._long_inst = "BASIS"
self._temps = None
self._bkg = None # Events workspace for brackground runs
self._bkg_scale = None
self._vanadium_files = None
self._momentum_range = None
self._t_mask = None
self._n_bins = None
@staticmethod
def category():
return "Diffraction\\Reduction"
@staticmethod
def version():
return 1
@staticmethod
def summary():
return 'Multiple-file BASIS crystal reduction for diffraction ' \
'detectors.'
@staticmethod
def seeAlso():
return ['AlignDetectors', 'DiffractionFocussing', 'SNSPowderReduction']
@staticmethod
def _run_list(runs):
"""
Obtain all run numbers from input string `runs`
Parameters
----------
runs: str
Run numbers to be reduced.
Returns
-------
list
"""
rl = list()
rn = runs.replace(' ', '') # remove spaces
for x in rn.split(','):
if '-' in x:
b, e = [int(y) for y in x.split('-')]
rl.extend([str(z) for z in range(b, e+1)])
else:
rl.append(x)
return rl
@staticmethod
def add_previous_pulse(w):
"""
Duplicate the events but shift them by one pulse, then add to
input workspace
Parameters
----------
w: Mantid.EventsWorkspace
Returns
-------
Mantid.EventsWorkspace
"""
pulse_width = 1.e6/60 # in micro-seconds
_t_w = ScaleX(w, Factor=-pulse_width, Operation='Add')
_t_w = Plus(w, _t_w, OutputWorkspace=w.name())
return _t_w
def PyInit(self):
# Input validators
array_length_three = FloatArrayLengthValidator(3)
# Properties
self.declareProperty('RunNumbers', '', 'Sample run numbers')
self.declareProperty(FileProperty(name='MaskFile',
defaultValue=self._mask_file,
action=FileAction.OptionalLoad,
extensions=['.xml']),
doc='See documentation for latest mask files.')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Output),
doc='Output Workspace. If background is ' +
'subtracted, _data and _background ' +
'workspaces will also be generated')
#
# Background for the sample runs
#
background_title = 'Background runs'
self.declareProperty('BackgroundRuns', '', 'Background run numbers')
self.setPropertyGroup('BackgroundRuns', background_title)
self.declareProperty("BackgroundScale", 1.0,
doc='The background will be scaled by this ' +
'number before being subtracted.')
self.setPropertyGroup('BackgroundScale', background_title)
#
# Vanadium
#
vanadium_title = 'Vanadium runs'
self.declareProperty('VanadiumRuns', '', 'Vanadium run numbers')
self.setPropertyGroup('VanadiumRuns', vanadium_title)
#
# Single Crystal Diffraction
#
crystal_diffraction_title = 'Single Crystal Diffraction'
self.declareProperty('PsiAngleLog', 'SE50Rot',
direction=Direction.Input,
doc='log entry storing rotation of the sample'
'around the vertical axis')
self.declareProperty('PsiOffset', 0.0,
direction=Direction.Input,
doc='Add this quantity to PsiAngleLog')
self.declareProperty(FloatArrayProperty('LatticeSizes', [0, 0, 0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated list "a, b, c"')
self.declareProperty(FloatArrayProperty('LatticeAngles',
[90.0, 90.0, 90.0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated ' +
'list "alpha, beta, gamma"')
# Reciprocal vector to be aligned with incoming beam
self.declareProperty(FloatArrayProperty('VectorU', [1, 0, 0],
array_length_three,
direction=Direction.Input),
doc='three item, comma-separated, HKL indexes'
'of the diffracting plane')
# Reciprocal vector orthogonal to VectorU and in-plane with
# incoming beam
self.declareProperty(FloatArrayProperty('VectorV', [0, 1, 0],
array_length_three,
direction=Direction.Input),
doc='three item, comma-separated, HKL indexes'
'of the direction perpendicular to VectorV'
'and the vertical axis')
# Abscissa view
self.declareProperty(FloatArrayProperty('Uproj', [1, 0, 0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated Abscissa view'
'of the diffraction pattern')
# Ordinate view
self.declareProperty(FloatArrayProperty('Vproj', [0, 1, 0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated Ordinate view'
'of the diffraction pattern')
# Hidden axis
self.declareProperty(FloatArrayProperty('Wproj', [0, 0, 1],
array_length_three,
direction=Direction.Input),
doc='Hidden axis view')
# Binnin in reciprocal slice
self.declareProperty('NBins', 400, direction=Direction.Input,
doc='number of bins in the HKL slice')
for a_property in ('PsiAngleLog', 'PsiOffset',
'LatticeSizes', 'LatticeAngles', 'VectorU',
'VectorV', 'Uproj', 'Vproj', 'Wproj', 'NBins'):
self.setPropertyGroup(a_property, crystal_diffraction_title)
def PyExec(self):
# Facility and database configuration
config_new_options = {'default.facility': 'SNS',
'default.instrument': 'BASIS',
'datasearch.searcharchive': 'On'}
# implement with ContextDecorator after python2 is deprecated)
with pyexec_setup(config_new_options) as self._temps:
# Load the mask to a temporary workspace
self._t_mask = LoadMask(Instrument='BASIS',
InputFile=self.getProperty('MaskFile').
value,
OutputWorkspace='_t_mask')
#
# Find the version of the Data Acquisition System
#
self._find_das_version()
#
# Find valid incoming momentum range
#
self._calculate_wavelength_band()
self._momentum_range = np.sort(2 * np.pi / self._wavelength_band)
#
# Pre-process the background runs
#
if self.getProperty('BackgroundRuns').value:
bkg_run_numbers = self._run_list(
self.getProperty('BackgroundRuns').value)
background_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(bkg_run_numbers))
for i, run in enumerate(bkg_run_numbers):
if self._bkg is None:
self._bkg = self._mask_t0_crop(run, '_bkg')
self._temps.workspaces.append('_bkg')
else:
_ws = self._mask_t0_crop(run, '_ws')
self._bkg += _ws
if '_ws' not in self._temps.workspaces:
self._temps.workspaces.append('_ws')
message = 'Pre-processing background: {} of {}'.\
format(i+1, len(bkg_run_numbers))
background_reporter.report(message)
SetGoniometer(self._bkg, Axis0='0,0,1,0,1')
self._bkg_scale = self.getProperty('BackgroundScale').value
background_reporter.report(len(bkg_run_numbers), 'Done')
# Pre-process the vanadium run(s) by removing the delayed
# emission time from the moderator and then saving to file(s)
if self.getProperty('VanadiumRuns').value:
run_numbers = self._run_list(
self.getProperty('VanadiumRuns').value)
vanadium_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(run_numbers))
self._vanadium_files = list()
for i, run in enumerate(run_numbers):
self._vanadium_files.append(self._save_t0(run))
message = 'Pre-processing vanadium: {} of {}'. \
format(i+1, len(run_numbers))
vanadium_reporter.report(message)
vanadium_reporter.report(len(run_numbers), 'Done')
# Determination of single crystal diffraction
self._determine_single_crystal_diffraction()
def _determine_single_crystal_diffraction(self):
"""
All work related to the determination of the diffraction pattern
"""
a, b, c = self.getProperty('LatticeSizes').value
alpha, beta, gamma = self.getProperty('LatticeAngles').value
u = self.getProperty('VectorU').value
v = self.getProperty('VectorV').value
uproj = self.getProperty('Uproj').value
vproj = self.getProperty('Vproj').value
wproj = self.getProperty('Wproj').value
n_bins = self.getProperty('NBins').value
self._n_bins = (n_bins, n_bins, 1)
axis0 = '{},0,1,0,1'.format(self.getProperty('PsiAngleLog').value)
axis1 = '{},0,1,0,1'.format(self.getProperty('PsiOffset').value)
# Options for SetUB independent of run
ub_args = dict(a=a, b=b, c=c,
alpha=alpha, beta=beta, gamma=gamma,
u=u, v=v)
min_values = None
# Options for algorithm ConvertToMD independent of run
convert_to_md_kwargs = dict(QDimensions='Q3D',
dEAnalysisMode='Elastic', Q3DFrames='HKL',
QConversionScales='HKL',
Uproj=uproj, Vproj=vproj, Wproj=wproj)
md_norm_scd_kwargs = None # Options for algorithm MDNormSCD
# Find solid angle and flux
if self._vanadium_files:
kwargs = dict(Filename='+'.join(self._vanadium_files),
MaskFile=self.getProperty("MaskFile").value,
MomentumMin=self._momentum_range[0],
MomentumMax=self._momentum_range[1])
_t_solid_angle, _t_int_flux = \
MDNormSCDPreprocessIncoherent(**kwargs)
else:
_t_solid_angle = self.nominal_solid_angle('_t_solid_angle')
_t_int_flux = self.nominal_integrated_flux('_t_int_flux')
# Process a sample at a time
run_numbers = self._run_list(self.getProperty("RunNumbers").value)
diffraction_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(run_numbers))
for i_run, run in enumerate(run_numbers):
_t_sample = self._mask_t0_crop(run, '_t_sample')
# Set Goniometer and UB matrix
SetGoniometer(_t_sample, Axis0=axis0, Axis1=axis1)
SetUB(_t_sample, **ub_args)
if self._bkg:
self._bkg.run().getGoniometer().\
setR(_t_sample.run().getGoniometer().getR())
SetUB(self._bkg, **ub_args)
# Determine limits for momentum transfer in HKL space. Needs to be
# done only once. We use the first run.
if min_values is None:
kwargs = dict(QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='HKL')
min_values, max_values = ConvertToMDMinMaxGlobal(_t_sample,
**kwargs)
convert_to_md_kwargs.update({'MinValues': min_values,
'MaxValues': max_values})
# Convert to MD
_t_md = ConvertToMD(_t_sample, OutputWorkspace='_t_md',
**convert_to_md_kwargs)
if self._bkg:
_t_bkg_md = ConvertToMD(self._bkg, OutputWorkspace='_t_bkg_md',
**convert_to_md_kwargs)
# Determine aligned dimensions. Need to be done only once
if md_norm_scd_kwargs is None:
aligned = list()
for i_dim in range(3):
kwargs = {'name': _t_md.getDimension(i_dim).name,
'min': min_values[i_dim],
'max': max_values[i_dim],
'n_bins': self._n_bins[i_dim]}
aligned.append(
'{name},{min},{max},{n_bins}'.format(**kwargs))
md_norm_scd_kwargs = dict(AlignedDim0=aligned[0],
AlignedDim1=aligned[1],
AlignedDim2=aligned[2],
FluxWorkspace=_t_int_flux,
SolidAngleWorkspace=_t_solid_angle,
SkipSafetyCheck=True)
# Normalize sample by solid angle and integrated flux;
# Accumulate runs into the temporary workspaces
MDNormSCD(_t_md,
OutputWorkspace='_t_data',
OutputNormalizationWorkspace='_t_norm',
TemporaryDataWorkspace='_t_data' if
mtd.doesExist('_t_data') else None,
TemporaryNormalizationWorkspace='_t_norm' if
mtd.doesExist('_t_norm') else None,
**md_norm_scd_kwargs)
if self._bkg:
MDNormSCD(_t_bkg_md,
OutputWorkspace='_t_bkg_data',
OutputNormalizationWorkspace='_t_bkg_norm',
TemporaryDataWorkspace='_t_bkg_data' if
mtd.doesExist('_t_bkg_data') else None,
TemporaryNormalizationWorkspace='_t_bkg_norm'
if mtd.doesExist('_t_bkg_norm') else None,
**md_norm_scd_kwargs)
message = 'Processing sample {} of {}'.\
format(i_run+1, len(run_numbers))
diffraction_reporter.report(message)
self._temps.workspaces.append('PreprocessedDetectorsWS') # to remove
# Iteration over the sample runs is done.
# Division by vanadium, subtract background, and rename workspaces
name = self.getPropertyValue("OutputWorkspace")
_t_data = DivideMD(LHSWorkspace='_t_data', RHSWorkspace='_t_norm')
if self._bkg:
_t_bkg_data = DivideMD(LHSWorkspace='_t_bkg_data',
RHSWorkspace='_t_bkg_norm')
_t_scale = CreateSingleValuedWorkspace(DataValue=self._bkg_scale)
_t_bkg_data = MultiplyMD(_t_bkg_data, _t_scale)
ws = MinusMD(_t_data, _t_bkg_data)
RenameWorkspace(_t_data, OutputWorkspace=name + '_dat')
RenameWorkspace(_t_bkg_data, OutputWorkspace=name + '_bkg')
else:
ws = _t_data
RenameWorkspace(ws, OutputWorkspace=name)
self.setProperty("OutputWorkspace", ws)
diffraction_reporter.report(len(run_numbers), 'Done')
def _save_t0(self, run_number, name='_t_ws'):
"""
Create temporary events file with delayed emission time from
moderator removed
:param run: run number
:param name: name for the output workspace
:return: file name of event file with events treated with algorithm
ModeratorTzeroLinear.
"""
ws = self._load_single_run(run_number, name)
ws = ModeratorTzeroLinear(InputWorkspace=ws.name(),
Gradient=self._tzero['gradient'],
Intercept=self._tzero['intercept'],
OutputWorkspace=ws.name())
# Correct old DAS shift of fast neutrons. See GitHub issue 23855
if self._das_version == VDAS.v1900_2018:
ws = self.add_previous_pulse(ws)
file_name = self._spawn_tempnexus()
SaveNexus(ws, file_name)
return file_name
def _mask_t0_crop(self, run_number, name):
"""
Load a run into a workspace with:
1. Masked detectors
2. Delayed emission time from moderator removed
3. Conversion of units to momentum
4. Remove events outside the valid momentum range
:param run_number: BASIS run number
:param name: name for the output workspace
:return: workspace object
"""
ws = self._load_single_run(run_number, name)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
ws = ModeratorTzeroLinear(InputWorkspace=ws.name(),
Gradient=self._tzero['gradient'],
Intercept=self._tzero['intercept'],
OutputWorkspace=ws.name())
# Correct old DAS shift of fast neutrons. See GitHub issue 23855
if self._das_version == VDAS.v1900_2018:
ws = self.add_previous_pulse(ws)
ws = ConvertUnits(ws, Target='Momentum', OutputWorkspace=ws.name())
ws = CropWorkspace(ws,
OutputWorkspace=ws.name(),
XMin=self._momentum_range[0],
XMax=self._momentum_range[1])
return ws
def _load_single_run(self, run, name):
"""
Find and load events from the diffraction tubes.
Run number 90000 discriminates between the old and new DAS
Parameters
----------
run: str
Run number
name: str
Name of the output EventsWorkspace
Returns
-------
EventsWorkspace
"""
banks = ','.join(['bank{}'.format(i) for i in self._diff_bank_numbers])
particular = {VDAS.v1900_2018: dict(NXentryName='entry-diff'),
VDAS.v2019_2100: dict(BankName=banks)}
identifier = "{0}_{1}".format(self._short_inst, str(run))
kwargs = dict(Filename=identifier, SingleBankPixelsOnly=False,
OutputWorkspace=name)
kwargs.update(particular[self._das_version])
return LoadEventNexus(**kwargs)
def _spawn_tempnexus(self):
"""
Create a temporary file and flag for removal upon algorithm completion.
:return: (str) absolute path to the temporary file.
"""
f = tempfile.NamedTemporaryFile(prefix='BASISCrystalDiffraction_',
suffix='.nxs',
dir=mantid_config['defaultsave.directory'],
delete=False)
file_name = f.name
f.close()
self._temps.files.append(file_name) # flag for removal
return file_name
def nominal_solid_angle(self, name):
"""
Generate an isotropic solid angle
:param name: Name of the output workspace
:return: reference to solid angle workspace
"""
ws = LoadNexus(Filename=self._solid_angle_ws_, OutputWorkspace=name)
ClearMaskFlag(ws)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
for i in range(ws.getNumberHistograms()):
ws.dataY(i)[0] = 0.0 if ws.getDetector(i).isMasked() else 1.0
ws.setX(i, self._momentum_range)
return ws
def nominal_integrated_flux(self, name):
"""
Generate a flux independent of momentum
:param name: Name of the output workspace
:return: reference to flux workspace
"""
ws = LoadNexus(Filename=self._flux_ws_, OutputWorkspace=name)
ClearMaskFlag(ws)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
return ws
def _find_das_version(self):
boundary_run = 90000 # from VDAS.v1900_2018 to VDAS.v2019_2100
runs = self.getProperty('RunNumbers').value
first_run = int(self._run_list(runs)[0])
if first_run < boundary_run:
self._das_version = VDAS.v1900_2018
else:
self._das_version = VDAS.v2019_2100
logger.information('DAS version is ' + str(self._das_version))
def _calculate_wavelength_band(self):
"""
Select the wavelength band examining the logs of the first sample
"""
runs = self.getProperty('RunNumbers').value
run = self._run_list(runs)[0]
_t_w = self._load_single_run(run, '_t_w')
wavelength = np.mean(_t_w.getRun().getProperty('LambdaRequest').value)
logger.error('DEBUG wavelength = ' + str(wavelength))
for reflection, band in self._wavelength_bands.items():
if band[0] <= wavelength <= band[1]:
self._wavelength_band = np.array(band)
break
# Register algorithm with Mantid.
AlgorithmFactory.subscribe(BASISCrystalDiffraction)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/BASISCrystalDiffraction.py
|
Python
|
gpl-3.0
| 26,717
|
[
"CRYSTAL"
] |
c627a69a01bf31699d96001a05b104e6c313291afcdf31040a36b6eb6fce49c6
|
#!usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import e_field_gen as e_field
import odeint_solve as ode
import sys as sys
import matplotlib.cm as col
ELEC_MASS = 9.10938356E-31
ELEC_CHARGE = -1.60217662E-19
FUND_FREQ = 3.7474057E14
SP_LIGHT = 3E8
PL_FWHM = 25E-15
FOCUS_RADIUS = 30E-6
PULSE_ENERGY = 0.6E-3
EPSILON_o = 8.85418782E-12
TIME_GRID = 200
INTENSITY = 1.88*(PULSE_ENERGY/(FOCUS_RADIUS**2*PL_FWHM))/np.pi #gaussian
FIELD_AMP = np.sqrt(2*INTENSITY/(EPSILON_o*SP_LIGHT))
FIELD_TOLERANCE = FIELD_AMP*1E-1
FIELD_AMP_ION = np.sqrt(2E14/(EPSILON_o*SP_LIGHT))
PONDER = (FIELD_AMP**2)*(ELEC_CHARGE**2)/(4*ELEC_MASS*(2*np.pi*FUND_FREQ)**2)
def plot(paths, time_grid, field):
ax1.clear()
cmap = col.copper
for i in range(len(paths)):
l = ax1.plot(time_grid, paths[i], color=cmap(i/float(len(paths))))
y_field = 0.2E-7*field[0](time_grid)/FIELD_AMP
l1 = ax1.plot(time_grid, y_field, 'b')
#l2 = ax1.plot(time, kin_en, 'g')
ax1.set_ylim(-0.1E-7, 0.8E-7)
ax1.set_xlim(0 , 0 + 20/(FUND_FREQ))
fig1.canvas.draw_idle()
def update(val):
qwp_1 = slider_1.val
hwp_2 = slider_2.val
qwp_2 = slider_3.val
hwp_3 = slider_4.val
qwp_3 = slider_5.val
delay_1 = slider_6.val
delay_2 = slider_7.val
ampl_1 = slider_8.val
ampl_2 = slider_9.val
ampl_3 = slider_10.val
closeness = slider_13.val
a = e_field.e_field_gen(3, False, ampl_1*FIELD_AMP, ampl_2*FIELD_AMP,
ampl_3*FIELD_AMP, 0,
delay_1/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[qwp_1], [hwp_2, qwp_2], [hwp_3, qwp_3]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = t#[j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, times[0], times[0] + 2/(FUND_FREQ), True, False, closeness*1E-9)
print(times[0], times[0] + 2/(FUND_FREQ), "!!!")
# plot(b,a)
plot(b[0][:,0], b[1], a)
# plot(a)
if __name__ == '__main__':
fig1 = plt.figure(1)
ax1 = plt.axes([0.05, 0.15, 0.9, 0.80])
#fig2 = plt.figure(2)
#ax2 = plt.axes([0.05, 0.15, 0.9, 0.80], projection='3d')
ax_slider_1 = plt.axes([0.1, 0.01, 0.2, 0.02])
ax_slider_2 = plt.axes([0.1, 0.04, 0.2, 0.02])
ax_slider_3 = plt.axes([0.1, 0.07, 0.2, 0.02])
ax_slider_4 = plt.axes([0.1, 0.1, 0.2, 0.02])
ax_slider_5 = plt.axes([0.1, 0.13, 0.2, 0.02])
ax_slider_6 = plt.axes([0.5, 0.01, 0.2, 0.02])
ax_slider_7 = plt.axes([0.5, 0.04, 0.2, 0.02])
ax_slider_8 = plt.axes([0.5, 0.07, 0.2, 0.02])
ax_slider_9 = plt.axes([0.5, 0.1, 0.2, 0.02])
ax_slider_10 = plt.axes([0.5, 0.13, 0.2, 0.02])
rax = plt.axes([0.0, 0.8, 0.1, 0.15])
ax_slider_11 = plt.axes([0.2, 0.97, 0.1, 0.02])
ax_slider_12 = plt.axes([0.6, 0.97, 0.1, 0.02])
ax_slider_13 = plt.axes([0.8, 0.97, 0.2, 0.02])
radio = widgets.RadioButtons(rax, ('CW', 'Pulsed'))
slider_1 = widgets.Slider(ax_slider_1, 'qwp_1', 0., 360)
slider_2 = widgets.Slider(ax_slider_2, 'hwp_2', 0., 360)
slider_3 = widgets.Slider(ax_slider_3, 'qwp_2', 0., 360)
slider_4 = widgets.Slider(ax_slider_4, 'hwp_3', 0., 360)
slider_5 = widgets.Slider(ax_slider_5, 'qwp_3', 0., 360)
slider_6 = widgets.Slider(ax_slider_6, 'delay_2', -2, 2)
slider_7 = widgets.Slider(ax_slider_7, 'delay_3', -2, 2)
slider_8 = widgets.Slider(ax_slider_8, 'ampl_1', 0, 1)
slider_9 = widgets.Slider(ax_slider_9, 'ampl_2', 0, 1)
slider_10 = widgets.Slider(ax_slider_10, 'ampl_3', 0, 1)
slider_11 = widgets.Slider(ax_slider_11, 'x-size', 0, 4)
slider_12 = widgets.Slider(ax_slider_12, 'x-start', -2, 2)
slider_13 = widgets.Slider(ax_slider_13, 'close', 0, 1)
#start
qwp_1 = slider_1.val
hwp_2 = slider_2.val
qwp_2 = slider_3.val
hwp_3 = slider_4.val
qwp_3 = slider_5.val
delay_1 = slider_6.val
delay_2 = slider_7.val
ampl_1 = slider_8.val
ampl_2 = slider_9.val
ampl_3 = slider_10.val
closeness = slider_13.val
a = e_field.e_field_gen(3, False, ampl_1*FIELD_AMP, 0*FIELD_AMP,
0*FIELD_AMP, 0,
0/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[0], [0, 0], [0, 0]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = t#[j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, 0, 0 + 1/(FUND_FREQ), True, False, closeness*1E-9)
#print(b)
# plot(b,a)
plot(b[0][:,0], b[1], a)
# plot(a)
#end
slider_1.on_changed(update)
slider_2.on_changed(update)
slider_3.on_changed(update)
slider_4.on_changed(update)
slider_5.on_changed(update)
slider_6.on_changed(update)
slider_7.on_changed(update)
slider_8.on_changed(update)
slider_9.on_changed(update)
slider_10.on_changed(update)
slider_11.on_changed(update)
slider_12.on_changed(update)
slider_13.on_changed(update)
radio.on_clicked(update)
plt.show()
|
KavuriG/classical-calc-three-color
|
plot-all-paths/tester.py
|
Python
|
gpl-3.0
| 5,731
|
[
"Gaussian"
] |
60c879df66ebecdaede8ca267507859f58f259b92753c0b41906cf49aefc35f9
|
import sys
import pytest
from gm_base.model_data import DataNode, Loader, NotificationHandler
from gm_base.model_data.yaml.resolver import resolve_scalar_tag
from ModelEditor.meconfig import MEConfig as cfg
import testing.ModelEditor.mock.mock_config as mockcfg
from gm_base.geomop_util import Position
# TODO: original test seems to setup QApplication, but it should not depend on Qt.
def test_parse(request=None):
error_handler = NotificationHandler()
mockcfg.set_empty_config()
if request is not None:
def fin_test_config():
mockcfg.clean_config()
request.addfinalizer(fin_test_config)
loader = Loader(error_handler)
# parse mapping, scalar
document = (
"format: ascii\n"
"file: dual_sorp.vtk"
)
root = loader.load(document)
assert root.children[0].value == 'ascii'
# parse sequence, scalar
document = (
"- ascii\n"
"- utf-8"
)
root = loader.load(document)
assert root.children[1].value == 'utf-8'
# test complex structure
mockcfg.load_complex_structure_to_config()
# test values - are scalars converted to the correct type?
assert cfg.root.children[0].children[0].children[0].value is None
assert cfg.root.children[1].children[1].children[0].value is True
assert cfg.root.children[0].children[1].children[1].value == 0.5
assert (cfg.root.children[1].children[1].children[1].children[0].children[1]
.value) == 'ALL'
assert (cfg.root.children[1].children[1].children[1].children[1].children[0]
.value) == 0
# test node spans - try to get node at certain positions
assert cfg.root.get_node_at_position(Position(5, 5)) == (
cfg.root.children[0].children[0].children[0])
assert cfg.root.get_node_at_position(Position(5, 9)) == (
cfg.root.children[0].children[0].children[0])
assert cfg.root.get_node_at_position(Position(13, 18)) == (
cfg.root.children[1].children[1]
.children[0])
assert cfg.root.get_node_at_position(Position(15, 22)) == (
cfg.root.children[1].children[1]
.children[1].children[0].children[0])
assert cfg.root.get_node_at_position(Position(15, 33)) == (
cfg.root.children[1].children[1]
.children[1].children[0].children[1])
# test absolute_path, get_node_at_path
assert cfg.root.get_node_at_path('/') == cfg.root
input_fields_node = cfg.root.get_node_at_path('/problem/primary_equation/input_fields')
assert input_fields_node == cfg.root.children[1].children[1].children[1]
assert input_fields_node.get_node_at_path('.') == input_fields_node
assert (input_fields_node.get_node_at_path('./0/r_set') ==
input_fields_node.children[0].children[1])
assert (input_fields_node.get_node_at_path('/problem/primary_equation/input_fields/0/r_set') ==
input_fields_node.children[0].children[1])
assert input_fields_node.get_node_at_path('../../..') == cfg.root
with pytest.raises(LookupError):
cfg.root.get_node_at_path('/invalid/path')
# test parser error
document = (
"format: ascii\n"
"- file: dual_sorp.vtk"
)
loader.load(document)
assert len(loader.notification_handler.notifications) == 1
# test tag parsing
document = (
"problem: !SequentialCoupling\n"
" test: 1"
)
root = loader.load(document)
assert root.children[0].type.value == 'SequentialCoupling'
assert root.get_node_at_position(Position(1, 11)).type.value == 'SequentialCoupling'
mockcfg.load_valid_structure_to_config()
# test get_node_at_path
assert cfg.root.get_node_at_path('/') == cfg.root
assert (cfg.root.get_node_at_path('/problem/mesh/mesh_file').value ==
'input/dual_por.msh')
assert (cfg.root.children[0].children[0].get_node_at_path(
'../primary_equation/balance').value is True)
# test tag
assert cfg.root.children[0].type.value == 'SequentialCoupling'
assert cfg.root.children[0].type.span.start.line == 6
assert cfg.root.children[0].type.span.start.column == 11
assert cfg.root.children[0].type.span.end.line == 6
assert cfg.root.children[0].type.span.end.column == 29
# test ref
input_fields = cfg.root.children[0].children[1].children[1]
assert input_fields.children[0].children[0].value == 0
assert input_fields.children[2].children[0].value == 0
# test empty abstract record
node = cfg.root.get_node_at_path('/problem/primary_equation/solver')
assert node.implementation == DataNode.Implementation.mapping
assert node.type.value == 'Petsc'
# test ref errors
document = (
"- &r text\n"
"- *x\n"
"- *r\n"
"- *y"
)
loader.notification_handler.clear()
root = loader.load(document)
assert len(loader.notification_handler.notifications) == 2
def test_resolver():
value = '13'
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:int'
value = '-13'
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:int'
value = '13.2'
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:float'
value = '-13.1e-13'
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:float'
value = 'true'
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:bool'
value = ''
assert resolve_scalar_tag(value) == 'tag:yaml.org,2002:null'
def profile_parsing():
import timeit
setup = (
"from data.yaml import Loader\n"
"with open('data/examples/config_simple.yaml') as file:\n"
" document = file.read()\n"
"loader = Loader()\n"
)
number = 100
total_time = timeit.timeit('root = loader.load(document)',
setup=setup,
number=number)
print("loading document takes ~{0:.3f}ms".format(total_time * 1000 / number))
setup += (
"root = loader.load(document)\n"
"from data.data_node import Position\n"
"position = Position(40, 35)\n"
)
number = 10000
total_time = timeit.timeit('root.get_node_at_position(position)',
setup=setup,
number=number)
print("finding node takes ~{0:.3f}ms".format(total_time * 1000 / number))
if __name__ == '__main__':
test_parse()
|
GeoMop/GeoMop
|
testing/ModelEditor/data/test_data_node.py
|
Python
|
gpl-3.0
| 6,377
|
[
"VTK"
] |
5cca155062ea5a7955f1f42e6ed6370b43ef85ecb309b65c211c7b8c75b143a5
|
from bayes_opt import BayesianOptimization
# Example of how to use this bayesian optimization package.
# Lets find the maximum of a simple quadratic function of two variables
# We create the bayes_opt object and pass the function to be maximized
# together with the parameters names and their bounds.
bo = BayesianOptimization(lambda x, y: -x**2 - (y - 1)**2 + 1,
{'x': (-4, 4), 'y': (-3, 3)})
# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})
# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionary with target values as keys of another
# dictionary with parameters names and their corresponding value.
bo.initialize({-2: {'x': 1, 'y': 0}, -1.251: {'x': 1, 'y': 1.5}})
# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(init_points=5, n_iter=15, kappa=3.29)
# The output values can be accessed with self.res
print(bo.res['max'])
# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
bo.explore({'x': [0.6], 'y': [-0.23]})
# Making changes to the gaussian process can impact the algorithm
# dramatically.
gp_params = {'corr': 'absolute_exponential',
'nugget': 1e-5}
# Run it again with different acquisition function
bo.maximize(n_iter=5, acq='ei', **gp_params)
# Finally, we take a look at the final results.
print(bo.res['max'])
print(bo.res['all'])
|
ysasaki6023/NeuralNetworkStudy
|
examples/usage.py
|
Python
|
mit
| 1,879
|
[
"Gaussian"
] |
62009f5811cfdca0279f0c29254f0f43e8c2c4ffcae75b200e370d66a944fb1a
|
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from ...model.PatternModel import Pattern, PatternModel
from ...model.util.PeakShapes import gaussian
unittest_path = os.path.dirname(__file__)
data_path = os.path.join(unittest_path, '../data')
class PatternModelTest(unittest.TestCase):
def setUp(self):
self.x = np.linspace(0.1, 15, 100)
self.y = np.sin(self.x)
self.pattern = Pattern(self.x, self.y)
self.pattern_model = PatternModel()
def test_set_pattern(self):
self.pattern_model.set_pattern(self.x, self.y, 'hoho')
assert_array_almost_equal(self.pattern_model.get_pattern().x, self.x)
assert_array_almost_equal(self.pattern_model.get_pattern().y, self.y)
self.assertEqual(self.pattern_model.get_pattern().name, 'hoho')
def test_load_pattern(self):
self.pattern_model.load_pattern(os.path.join(data_path, 'pattern_001.xy'))
self.assertEqual(self.pattern_model.get_pattern().name, 'pattern_001')
self.assertNotEqual(len(self.x), len(self.pattern_model.get_pattern().x))
self.assertNotEqual(len(self.y), len(self.pattern_model.get_pattern().y))
def test_auto_background_subtraction(self):
x = np.linspace(0, 24, 2500)
y = np.zeros(x.shape)
peaks = [
[10, 3, 0.1],
[12, 4, 0.1],
[12, 6, 0.1],
]
for peak in peaks:
y += gaussian(x, peak[0], peak[1], peak[2])
y_bkg = x * 0.4 + 5.0
y_measurement = y + y_bkg
self.pattern_model.set_pattern(x, y_measurement)
auto_background_subtraction_parameters = [2, 50, 50]
self.pattern_model.set_auto_background_subtraction(auto_background_subtraction_parameters)
x_spec, y_spec = self.pattern_model.pattern.data
self.assertAlmostEqual(np.sum(y_spec - y), 0)
if __name__ == '__main__':
unittest.main()
|
erangre/Dioptas
|
dioptas/tests/unit_tests/test_PatternModel.py
|
Python
|
gpl-3.0
| 2,962
|
[
"Gaussian"
] |
6f9412252bf60cb1e028473861c1a66c0076d88ef4066591fd0789ff5a1153b2
|
import numpy as np
import imblearn.over_sampling
import sklearn.preprocessing
import astropy.convolution
START = 6519
END = 6732
def air2vacuum(air_waves):
'''Convert air wavelengths to vacuum wavelengths'''
# http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
vac_waves = np.zeros_like(air_waves)
for idx, wave in enumerate(air_waves):
s = (10 ** 4) / wave
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 \
- s ** 2) + 0.0001599740894897 / (38.92568793293 - s ** 2)
vac_waves[idx] = wave * n
return vac_waves
def convolve_spectrum(fluxes, stddev=7):
'''Convolve spectrum with Gaussian 1D kernel.'''
kernel = astropy.convolution.Gaussian1DKernel(stddev=stddev)
return astropy.convolution.convolve(fluxes, kernel, boundary='extend')
def resample_spectrum(waves, fluxes, space=np.linspace(START, END, 140)):
return np.interp(space, waves, fluxes)
def smote_over_sample(X, y, *, n_classes=3):
'''Oversample the dataset
so that all classes has the same number of samples.'''
X_ = np.copy(X)
y_ = np.copy(y)
smote = imblearn.over_sampling.SMOTE()
for _ in range(n_classes - 1):
X_, y_ = smote.fit_sample(X_, y_)
return X_, y_
def scale_samples(X):
'''Scale each sample to have zero mean and unit sample.'''
return sklearn.preprocessing.scale(X, axis=1)
def scale_features(X_train, X_validation,
scaler=sklearn.preprocessing.StandardScaler()):
'''Fit scaler on X_train and tranform both X_train and X_validation.'''
X_tr = scaler.fit_transform(X_train)
X_val = scaler.transform(X_validation)
return X_tr, X_val
|
podondra/bt-spectraldl
|
notebooks/spectraldl/preprocessing.py
|
Python
|
gpl-3.0
| 1,684
|
[
"Gaussian"
] |
7ae9ee2525650fbba0af0e52f45a6b85b64e9117b71f5e109a6f3f1fe7d421f8
|
"""
Acceptance tests for Studio's Settings Details pages
"""
from datetime import datetime, timedelta
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...fixtures.config import ConfigModelFixture
from ...fixtures.course import CourseFixture
from ...pages.studio.settings import SettingsPage
from ...pages.studio.overview import CourseOutlinePage
from ...tests.studio.base_studio_test import StudioCourseTest
from ..helpers import (
generate_course_key,
select_option_by_value,
is_option_value_selected,
element_has_text,
)
class StudioSettingsDetailsTest(StudioCourseTest):
"""Base class for settings and details page tests."""
def setUp(self, is_staff=True):
super(StudioSettingsDetailsTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
self.assertTrue(self.settings_detail.is_browser_on_page())
class SettingsMilestonesTest(StudioSettingsDetailsTest):
"""
Tests for milestones feature in Studio's settings tab
"""
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again and confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
@skip('Passes in devstack, passes individually in Jenkins, fails in suite in Jenkins.')
def test_enable_entrance_exam_for_course(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And also that the entrance exam is destroyed after deselecting the checkbox.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
# title with text 'Entrance Exam' should be present on page.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
# Delete the currently created entrance exam.
self.settings_detail.visit()
self.settings_detail.require_entrance_exam(required=False)
self.settings_detail.save_changes()
course_outline_page.visit()
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
def test_entrance_exam_has_unit_button(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And user has option to add units only instead of any Subsection.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
course_outline_page.wait_for_ajax()
# button with text 'New Unit' should be present.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Unit'
))
# button with text 'New Subsection' should not be present.
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Subsection'
))
class CoursePacingTest(StudioSettingsDetailsTest):
"""Tests for setting a course to self-paced."""
def populate_course_fixture(self, __):
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
# Set the course start date to tomorrow in order to allow setting pacing
self.course_fixture.add_course_details({'start_date': datetime.now() + timedelta(days=1)})
def test_default_instructor_paced(self):
"""
Test that the 'instructor paced' button is checked by default.
"""
self.assertEqual(self.settings_detail.course_pacing, 'Instructor-Paced')
def test_self_paced(self):
"""
Test that the 'self-paced' button is checked for a self-paced
course.
"""
self.course_fixture.add_course_details({
'self_paced': True
})
self.course_fixture.configure_course()
self.settings_detail.refresh_page()
self.assertEqual(self.settings_detail.course_pacing, 'Self-Paced')
def test_set_self_paced(self):
"""
Test that the self-paced option is persisted correctly.
"""
self.settings_detail.course_pacing = 'Self-Paced'
self.settings_detail.save_changes()
self.settings_detail.refresh_page()
self.assertEqual(self.settings_detail.course_pacing, 'Self-Paced')
def test_toggle_pacing_after_course_start(self):
"""
Test that course authors cannot toggle the pacing of their course
while the course is running.
"""
self.course_fixture.add_course_details({'start_date': datetime.now()})
self.course_fixture.configure_course()
self.settings_detail.refresh_page()
self.assertTrue(self.settings_detail.course_pacing_disabled())
self.assertIn('Course pacing cannot be changed', self.settings_detail.course_pacing_disabled_text)
|
inares/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings_details.py
|
Python
|
agpl-3.0
| 9,782
|
[
"VisIt"
] |
8617ac0ab3679029c65b9bf7c4a8c5fcf1a7f3aefcc400f3a2204936575196b3
|
# Copyright 2013-2014 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SeqFindr BLAST methods
"""
import subprocess
import shutil
import os
import sys
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast.Applications import NcbitblastnCommandline
from Bio.Blast.Applications import NcbitblastxCommandline
import SeqFindr.util
def make_BLAST_database(fasta_file):
"""
Given a fasta_file, generate a nucleotide BLAST database
Database will end up in DB/ of working directory or OUTPUT/DB if an
output directory is given in the arguments
:param fasta_file: full path to a fasta file
:type fasta_file: string
:rtype: the strain id **(must be delimited by '_')**
"""
proc = subprocess.Popen(["makeblastdb", "-in", fasta_file, "-dbtype",
'nucl'], stdout=subprocess.PIPE)
sys.stderr.write(proc.stdout.read())
for file_ext in ['.nhr', '.nin', '.nsq']:
path = fasta_file + file_ext
shutil.move(path, os.path.join('DBs', os.path.basename(path)))
sys.stderr.write(("Getting %s and assocaiated database files to the DBs "
"location\n") % (fasta_file))
shutil.copy2(fasta_file, os.path.join('DBs', os.path.basename(fasta_file)))
return os.path.basename(fasta_file).split('_')[0]
def run_BLAST(query, database, args, cons_run):
"""
Given a mfa of query sequences of interest & a database, search for them.
Important to note:
* Turns dust filter off,
* Only a single target sequence (top hit),
* Output in XML format as blast.xml.
# TODO: Add evalue filtering ?
# TODO: add task='blastn' to use blastn scoring ?
.. warning:: default is megablast
.. warning:: tblastx funcationality has not been checked
:param query: the fullpath to the vf.mfa
:param database: the full path of the databse to search for the vf in
:param args: the arguments parsed to argparse
:param cons_run: part of a mapping consensus run
:type query: string
:type database: string
:type args: argparse args (dictionary)
:type cons_run: boolean
:returns: the path of the blast.xml file
"""
tmp1 = os.path.splitext(query.split('/')[-1])[0]
tmp2 = os.path.splitext(database.split('/')[-1])[0]
if not cons_run:
outfile = os.path.join("BLAST_results/",
"DB="+tmp1+"ID="+tmp2+"_blast.xml")
else:
outfile = os.path.join("BLAST_results/",
"cons_DB="+tmp1+"ID="+tmp2+"_blast.xml")
protein = False
# File type not specified, determine using util.is_protein()
if args.reftype is None:
if SeqFindr.util.is_protein(query) != -1:
protein = True
sys.stderr.write('%s is protein' % (query))
elif args.reftype == 'prot':
protein = True
sys.stderr.write('%s is protein\n' % (query))
run_command = ''
if protein:
sys.stderr.write('Using tblastn\n')
run_command = NcbitblastnCommandline(query=query, seg='no',
db=database, outfmt=5, num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue, out=outfile)
else:
if args.tblastx:
sys.stderr.write('Using tblastx\n')
run_command = NcbitblastxCommandline(query=query, seg='no',
db=database, outfmt=5, num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue,
out=outfile)
else:
sys.stderr.write('Using blastn\n')
if args.short == False:
run_command = NcbiblastnCommandline(query=query, dust='no',
db=database, outfmt=5,
num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue,
out=outfile)
else:
sys.stderr.write('Optimising for short query sequences\n')
run_command = NcbiblastnCommandline(query=query, dust='no',
db=database, outfmt=5, word_size=7,
num_threads=args.BLAST_THREADS, evalue=1000,
max_target_seqs=1, out=outfile)
sys.stderr.write(str(run_command)+"\n")
run_command()
return os.path.join(os.getcwd(), outfile)
def parse_BLAST(blast_results, tol, careful):
"""
Using NCBIXML parse the BLAST results, storing & returning good hits
Here good hits are:
* hsp.identities/float(record.query_length) >= tol
:param blast_results: full path to a blast run output file (in XML format)
:param tol: the cutoff threshold (see above for explaination)
:type blast_results: string
:type tol: float
:rtype: list of satifying hit names
"""
if os.path.isfile(os.path.expanduser(blast_results)):
hits = []
for record in NCBIXML.parse(open(blast_results)):
for align in record.alignments:
for hsp in align.hsps:
hit_name = record.query.split(',')[1].strip()
cutoff = hsp.identities/float(record.query_length)
if cutoff >= tol:
hits.append(hit_name.strip())
# New method for the --careful option
elif cutoff >= tol-careful:
print "Please confirm this hit:"
print "Name,SeqFindr score,Len(align),Len(query),Identities,Gaps"
print "%s,%f,%i,%i,%i,%i" % (hit_name, cutoff, hsp.align_length, record.query_length, hsp.identities, hsp.gaps)
accept = raw_input("Should this be considered a hit? (y/N)")
if accept == '':
pass
elif accept.lower() == 'n':
pass
elif accept.lower() == 'y':
hits.append(hit_name.strip())
else:
print "Input must be y, n or enter."
print "Assuming n"
else:
pass
else:
sys.stderr.write("BLAST results do not exist. Exiting.\n")
sys.exit(1)
return hits
|
BeatsonLab-MicrobialGenomics/SeqFindR
|
SeqFindr/blast.py
|
Python
|
apache-2.0
| 6,984
|
[
"BLAST"
] |
5a29540ab3c9bf29b5320369d6fe5d736afcacfb276fd5f7465ccf5df984f436
|
# -*- coding: utf-8 -*-
"""test email sending"""
from unittest import skipIf
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse
from coop_cms.models import Newsletter
from coop_cms.settings import is_localized, is_multilang
from coop_cms.tests import BaseTestCase
from model_mommy import mommy
from .. import models
class ViewOnlineTest(BaseTestCase):
def test_view_online(self):
contact = mommy.make(models.Contact, email='toto@toto.fr')
newsletter_data = {
'subject': 'This is the subject',
'content': '<h2>Hello #!-fullname-!#!</h2><p>Visit <a href="http://toto.fr">us</a></p>',
'template': 'test/newsletter_contact.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing = mommy.make(models.Emailing, newsletter=newsletter)
emailing.sent_to.add(contact)
emailing.save()
url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, contact.fullname)
self.assertEqual(models.MagicLink.objects.count(), 2)
magic_link0 = models.MagicLink.objects.all()[0]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link0.uuid, contact.uuid]))
self.assertEqual(magic_link0.url, '/this-link-without-prefix-in-template')
magic_link1 = models.MagicLink.objects.all()[1]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link1.uuid, contact.uuid]))
self.assertEqual(magic_link1.url, 'http://toto.fr')
@skipIf(not is_localized() or not is_multilang(), "not localized")
def test_emailing_view_online_lang(self):
"""test view emailing in other lang"""
contact = mommy.make(models.Contact, email='toto@toto.fr')
newsletter_data = {
'subject': 'This is the subject',
'content': '<h2>Hello #!-fullname-!#!</h2><p>Visit <a href="http://toto.fr">us</a></p>',
'template': 'test/newsletter_contact.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing = mommy.make(models.Emailing, newsletter=newsletter)
emailing.sent_to.add(contact)
emailing.save()
other_lang = settings.LANGUAGES[1][0]
url = reverse('newsletters:view_online_lang', args=[emailing.id, contact.uuid, other_lang])
response = self.client.get(url)
self.assertEqual(302, response.status_code)
next_url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])[3:]
redirect_url = other_lang + next_url
self.assertTrue(response['Location'].find(redirect_url) >= 0)
@override_settings(SECRET_KEY="super-héros")
def test_view_online_utf_links(self):
contact = mommy.make(models.Contact, email='toto@toto.fr', first_name='Emmet', last_name='Brown')
newsletter_data = {
'subject': 'This is the subject',
'content': '<h2>Hello #!-fullname-!#!</h2><p>Visit <a href="http://toto.fr/à-bientôt">à bientôt</a></p>',
'template': 'test/newsletter_contact.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing = mommy.make(models.Emailing, newsletter=newsletter)
emailing.sent_to.add(contact)
emailing.save()
url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, contact.fullname)
self.assertEqual(models.MagicLink.objects.count(), 2)
magic_link0 = models.MagicLink.objects.all()[0]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link0.uuid, contact.uuid]))
self.assertEqual(magic_link0.url, '/this-link-without-prefix-in-template')
magic_link1 = models.MagicLink.objects.all()[1]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link1.uuid, contact.uuid]))
self.assertEqual(magic_link1.url, 'http://toto.fr/à-bientôt')
def test_view_long_links(self):
contact = mommy.make(models.Contact, email='toto@toto.fr', first_name='Emmet', last_name='Brown')
short_link = "http://toto.fr/{0}".format("abcde" * 100)[:499]
long_link = "http://toto.fr/{0}".format("abcde" * 100) # >500 chars
newsletter_data = {
'subject': 'This is the subject',
'content': '<p>Visit <a href="{0}">long link</a> <a href="{1}">long link</a></p>'.format(
short_link, long_link
),
'template': 'test/newsletter_no_link.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing = mommy.make(models.Emailing, newsletter=newsletter)
emailing.sent_to.add(contact)
emailing.save()
url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(models.MagicLink.objects.count(), 1)
magic_link0 = models.MagicLink.objects.all()[0]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link0.uuid, contact.uuid]))
self.assertEqual(magic_link0.url, short_link)
self.assertContains(response, long_link)
def test_view_duplicate_links(self):
contact = mommy.make(models.Contact, email='toto@toto.fr', first_name='Emmet', last_name='Brown')
short_link = "http://toto.fr/abcde/"
newsletter_data = {
'subject': 'This is the subject',
'content': '<p>Visit <a href="{0}">link1</a> <a href="{1}">link2</a></p>'.format(
short_link, short_link
),
'template': 'test/newsletter_no_link.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing = mommy.make(models.Emailing, newsletter=newsletter)
emailing.sent_to.add(contact)
emailing.save()
url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(models.MagicLink.objects.count(), 1)
magic_link0 = models.MagicLink.objects.all()[0]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link0.uuid, contact.uuid]))
self.assertEqual(magic_link0.url, short_link)
def test_view_duplicate_emailing(self):
contact = mommy.make(models.Contact, email='toto@toto.fr', first_name='Emmet', last_name='Brown')
short_link = "http://toto.fr/abcde/"
newsletter_data = {
'subject': 'This is the subject',
'content': '<p>Visit <a href="{0}">link1</a></p>'.format(
short_link
),
'template': 'test/newsletter_no_link.html'
}
newsletter = mommy.make(Newsletter, **newsletter_data)
emailing1 = mommy.make(models.Emailing, newsletter=newsletter)
emailing1.sent_to.add(contact)
emailing1.save()
emailing2 = mommy.make(models.Emailing, newsletter=newsletter)
emailing2.sent_to.add(contact)
emailing2.save()
for emailing in (emailing1, emailing2):
url = reverse('newsletters:view_online', args=[emailing.id, contact.uuid])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
magic_links = models.MagicLink.objects.filter(emailing=emailing)
self.assertEqual(magic_links.count(), 1)
magic_link0 = magic_links[0]
self.assertContains(response, reverse('newsletters:view_link', args=[magic_link0.uuid, contact.uuid]))
self.assertEqual(magic_link0.url, short_link)
|
ljean/coop_cms
|
coop_cms/apps/newsletters/tests/test_view_online.py
|
Python
|
bsd-3-clause
| 8,054
|
[
"VisIt"
] |
84bbb3e3dd74835a6db6668836acbf3cbef0301d9493db99783437af8a6ebfcc
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always function. """
from __future__ import absolute_import
import inspect
from types import FunctionType
from myhdl import InstanceError
from myhdl._util import _isGenFunc, _makeAST
from myhdl._Waiter import _inferWaiter
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
class _error:
pass
_error.NrOfArgs = "decorated generator function should not have arguments"
_error.ArgType = "decorated object should be a generator function"
class _CallInfo(object):
def __init__(self, name, modctxt, symdict):
self.name = name
self.modctxt = modctxt
self.symdict = symdict
def _getCallInfo():
"""Get info on the caller of an Instantiator.
An Instantiator should be used in a block context.
This function gets the required info about the caller.
It uses the frame stack:
0: this function
1: the instantiator decorator
2: the block function that defines instances
3: the caller of the block function, e.g. the BlockInstance.
"""
from myhdl import _block
funcrec = inspect.stack()[2]
name = funcrec[3]
frame = funcrec[0]
symdict = dict(frame.f_globals)
symdict.update(frame.f_locals)
modctxt = False
callerrec = inspect.stack()[3]
f_locals = callerrec[0].f_locals
if 'self' in f_locals:
modctxt = isinstance(f_locals['self'], _block._Block)
return _CallInfo(name, modctxt, symdict)
def instance(genfunc):
callinfo = _getCallInfo()
if not isinstance(genfunc, FunctionType):
raise InstanceError(_error.ArgType)
if not _isGenFunc(genfunc):
raise InstanceError(_error.ArgType)
if genfunc.__code__.co_argcount > 0:
raise InstanceError(_error.NrOfArgs)
return _Instantiator(genfunc, callinfo=callinfo)
class _Instantiator(object):
def __init__(self, genfunc, callinfo):
self.callinfo = callinfo
self.callername = callinfo.name
self.modctxt = callinfo.modctxt
self.genfunc = genfunc
self.gen = genfunc()
# infer symdict
f = self.funcobj
varnames = f.__code__.co_varnames
symdict = {}
for n, v in callinfo.symdict.items():
if n not in varnames:
symdict[n] = v
self.symdict = symdict
# print modname, genfunc.__name__
tree = self.ast
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
self.inputs = v.inputs
self.outputs = v.outputs
self.inouts = v.inouts
self.embedded_func = v.embedded_func
self.sigdict = v.sigdict
self.losdict = v.losdict
@property
def name(self):
return self.funcobj.__name__
@property
def funcobj(self):
return self.genfunc
@property
def waiter(self):
return self._waiter()(self.gen)
def _waiter(self):
return _inferWaiter
@property
def ast(self):
return _makeAST(self.funcobj)
|
DeadBugEngineering/myHDL_shenanigans
|
ssd1306_8x64bit_driver/myhdl_10dev/lib/python2.7/site-packages/myhdl-1.0.dev0-py2.7.egg/myhdl/_instance.py
|
Python
|
lgpl-2.1
| 3,993
|
[
"VisIt"
] |
960629d44c884b0c9580b1091304f4d25b64ceb2989acf520a34c567a361436b
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Base class for Residue, Chain, Model and Structure classes.
It is a simple container class, with list and dictionary like properties.
"""
from copy import copy
from Bio.PDB.PDBExceptions import PDBConstructionException
class Entity(object):
"""
Basic container object. Structure, Model, Chain and Residue
are subclasses of Entity. It deals with storage and lookup.
"""
def __init__(self, id):
self.id = id
self.full_id = None
self.parent = None
self.child_list = []
self.child_dict = {}
# Dictionary that keeps additional properties
self.xtra = {}
# Special methods
def __len__(self):
"Return the number of children."
return len(self.child_list)
def __getitem__(self, id):
"Return the child with given id."
return self.child_dict[id]
def __delitem__(self, id):
"Remove a child."
return self.detach_child(id)
def __contains__(self, id):
"True if there is a child element with the given id."
return (id in self.child_dict)
def __iter__(self):
"Iterate over children."
for child in self.child_list:
yield child
# Public methods
def get_level(self):
"""Return level in hierarchy.
A - atom
R - residue
C - chain
M - model
S - structure
"""
return self.level
def set_parent(self, entity):
"Set the parent Entity object."
self.parent = entity
def detach_parent(self):
"Detach the parent."
self.parent = None
def detach_child(self, id):
"Remove a child."
child = self.child_dict[id]
child.detach_parent()
del self.child_dict[id]
self.child_list.remove(child)
def add(self, entity):
"Add a child to the Entity."
entity_id = entity.get_id()
if self.has_id(entity_id):
raise PDBConstructionException(
"%s defined twice" % str(entity_id))
entity.set_parent(self)
self.child_list.append(entity)
self.child_dict[entity_id] = entity
def insert(self, pos, entity):
"Add a child to the Entity at a specified position."
entity_id = entity.get_id()
if self.has_id(entity_id):
raise PDBConstructionException(
"%s defined twice" % str(entity_id))
entity.set_parent(self)
self.child_list[pos:pos] = [entity]
self.child_dict[entity_id] = entity
def get_iterator(self):
"Return iterator over children."
for child in self.child_list:
yield child
def get_list(self):
"Return a copy of the list of children."
return copy(self.child_list)
def has_id(self, id):
"""True if a child with given id exists."""
return (id in self.child_dict)
def get_parent(self):
"Return the parent Entity object."
return self.parent
def get_id(self):
"Return the id."
return self.id
def get_full_id(self):
"""Return the full id.
The full id is a tuple containing all id's starting from
the top object (Structure) down to the current object. A full id for
a Residue object e.g. is something like:
("1abc", 0, "A", (" ", 10, "A"))
This corresponds to:
Structure with id "1abc"
Model with id 0
Chain with id "A"
Residue with id (" ", 10, "A")
The Residue id indicates that the residue is not a hetero-residue
(or a water) beacuse it has a blank hetero field, that its sequence
identifier is 10 and its insertion code "A".
"""
if self.full_id is None:
entity_id = self.get_id()
l = [entity_id]
parent = self.get_parent()
while not (parent is None):
entity_id = parent.get_id()
l.append(entity_id)
parent = parent.get_parent()
l.reverse()
self.full_id = tuple(l)
return self.full_id
def transform(self, rot, tran):
"""
Apply rotation and translation to the atomic coordinates.
Example:
>>> rotation=rotmat(pi, Vector(1, 0, 0))
>>> translation=array((0, 0, 1), 'f')
>>> entity.transform(rotation, translation)
@param rot: A right multiplying rotation matrix
@type rot: 3x3 Numeric array
@param tran: the translation vector
@type tran: size 3 Numeric array
"""
for o in self.get_list():
o.transform(rot, tran)
def copy(self):
shallow = copy(self)
shallow.child_list = []
shallow.child_dict = {}
shallow.xtra = copy(self.xtra)
shallow.detach_parent()
for child in self.child_list:
shallow.add(child.copy())
return shallow
class DisorderedEntityWrapper(object):
"""
This class is a simple wrapper class that groups a number of equivalent
Entities and forwards all method calls to one of them (the currently selected
object). DisorderedResidue and DisorderedAtom are subclasses of this class.
E.g.: A DisorderedAtom object contains a number of Atom objects,
where each Atom object represents a specific position of a disordered
atom in the structure.
"""
def __init__(self, id):
self.id = id
self.child_dict = {}
self.selected_child = None
self.parent = None
# Special methods
def __getattr__(self, method):
"Forward the method call to the selected child."
if not hasattr(self, 'selected_child'):
# Avoid problems with pickling
# Unpickling goes into infinite loop!
raise AttributeError
return getattr(self.selected_child, method)
def __getitem__(self, id):
"Return the child with the given id."
return self.selected_child[id]
# XXX Why doesn't this forward to selected_child?
# (NB: setitem was here before getitem, iter, len, sub)
def __setitem__(self, id, child):
"Add a child, associated with a certain id."
self.child_dict[id] = child
def __contains__(self, id):
"True if the child has the given id."
return (id in self.selected_child)
def __iter__(self):
"Return the number of children."
return iter(self.selected_child)
def __len__(self):
"Return the number of children."
return len(self.selected_child)
def __sub__(self, other):
"""Subtraction with another object."""
return self.selected_child - other
# Public methods
def get_id(self):
"Return the id."
return self.id
def disordered_has_id(self, id):
"""True if there is an object present associated with this id."""
return (id in self.child_dict)
def detach_parent(self):
"Detach the parent"
self.parent = None
for child in self.disordered_get_list():
child.detach_parent()
def get_parent(self):
"Return parent."
return self.parent
def set_parent(self, parent):
"Set the parent for the object and its children."
self.parent = parent
for child in self.disordered_get_list():
child.set_parent(parent)
def disordered_select(self, id):
"""Select the object with given id as the currently active object.
Uncaught method calls are forwarded to the selected child object.
"""
self.selected_child = self.child_dict[id]
def disordered_add(self, child):
"This is implemented by DisorderedAtom and DisorderedResidue."
raise NotImplementedError
def is_disordered(self):
"""
Return 2, indicating that this Entity is a collection of Entities.
"""
return 2
def disordered_get_id_list(self):
"Return a list of id's."
# sort id list alphabetically
return sorted(self.child_dict)
def disordered_get(self, id=None):
"""Get the child object associated with id.
If id is None, the currently selected child is returned.
"""
if id is None:
return self.selected_child
return self.child_dict[id]
def disordered_get_list(self):
"Return list of children."
return list(self.child_dict.values())
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PDB/Entity.py
|
Python
|
apache-2.0
| 8,749
|
[
"Biopython"
] |
4b0464bdd09e629452790fd09804e22ab063e8a7368db470309eb94f699bf299
|
#!/usr/bin/env python2.7
# Copyright 2014 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scan to PDF.
Usage:
scanpdf [options] scan
scanpdf [options] pdf <pdffile>
scanpdf [options] scan pdf <pdffile>
Options:
-v --verbose Verbose logging
-d --debug Debug logging
--dpi=<dpi> DPI to scan in [default: 300]
--tmpdir=<dir> Temporary directory
--keep-tmpdir Whether to keep the tmp dir after scanning or not [default: False]
--face-up=<true/false> Face-up scanning [default: True]
--keep-blanks Don't check for and remove blank pages
--blank-threshold=<ths> Percentage of white to be marked as blank [default: 0.97]
--post-process Run unpaper to deskew/clean up
"""
import sys, os
import logging
import shutil
import re
from version import __version__
import docopt
import subprocess
import time
import glob
from itertools import combinations
class ScanPdf(object):
"""
The main clas. Performs the following functions:
"""
def __init__ (self):
"""
"""
self.config = None
self.bw_pages = {} # Keep track of which pages were in B&W
def cmd(self, cmd_list):
if isinstance(cmd_list, list):
cmd_list = ' '.join(cmd_list)
logging.debug("Running cmd: %s" % cmd_list)
try:
out = subprocess.check_output(cmd_list, stderr=subprocess.STDOUT, shell=True)
logging.debug(out)
return out
except subprocess.CalledProcessError as e:
print (e.output)
self._error("Could not run command %s" % cmd_list)
def run_scan(self):
device = os.environ['SCANBD_DEVICE']
self.cmd('logger -t "scanbd: " "Begin of scan "')
c = ['SANE_CONFIG_DIR=/etc/scanbd',
'scanadf',
'-d "%s"' % device,
'--source "ADF Duplex"',
'--mode Color',
'--resolution %sdpi' % self.dpi,
#'--y-resolution %sdpi' % self.dpi,
'-o %s/page_%%04d' % self.tmp_dir,
#'-y 876.695mm',
#'--page-height 355.617mm',
'--page-height 876.695',
'-y 876.695',
#'--buffermode On',
'--brightness=25',
'--emphasis=20',
'--ald yes',
]
self.cmd(c)
self.cmd('logger -t "scanbd: " "End of scan "')
def _error(self, msg):
print("ERROR: %s" % msg)
sys.exit(-1)
def _atoi(self,text):
return int(text) if text.isdigit() else text
def _natural_keys(self, text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ self._atoi(c) for c in re.split('(\d+)', text) ]
def get_pages(self):
cwd = os.getcwd()
os.chdir(self.tmp_dir)
pages = glob.glob('./page_*')
pages.sort(key = self._natural_keys)
os.chdir(cwd)
return pages
def reorder_face_up(self, pages):
reorder = []
assert len(pages) % 2 == 0, "Why is page count not even for duplexing??"
logging.info("Reordering pages")
pages.reverse()
return pages
def parse_dimensions(self, result):
first_line = str(result.splitlines()[0].strip())
logging.debug(first_line)
mCropDim = re.compile("""\s*(?P<filename>[\d\w\[_\/\\\.]+)\s+\w+\s+(?P<X>\d+)x(?P<Y>\d+)\s+""")
# blank3.pnm PPM 1x1 1950x2716-1-1 8-bit sRGB 0.010u 0:00.009
matchCropDim = mCropDim.search(first_line)
if matchCropDim:
x = int(matchCropDim.group('X'))
y = int(matchCropDim.group('Y'))
else:
x = -1
y = -1
return x, y
def get_dimensions(self, filename):
c = 'identify %s' % filename
result = self.cmd(c)
return self.parse_dimensions(result)
def is_blank(self, filename):
"""
Returns true if image in filename is blank
- Shave off one inch around edges
- Blur and crop down as much as possible
- If remaining page has a dimension smaller than 0.3" conclude it's blank
"""
if not os.path.exists(filename):
return True
#c = 'convert %s -shave %sx%s -virtual-pixel White -blur 0x15 -fuzz 15%% -trim info:' % (filename, self.dpi, self.dpi)
c = 'convert %s -shave %sx%s -density %s -adaptive-resize 65%% -virtual-pixel White -blur 0x15 -fuzz 15%% -trim info:' % (filename, self.dpi, self.dpi, int(self.dpi/2))
result = self.cmd(c)
x, y = self.parse_dimensions(result)
if x>0 and y>0:
logging.debug('Finding threshold for blanks')
threshold = int(self.dpi)/2*0.3 # Threshold is 0.3 inches
logging.debug('x=%s, y=%s, threshold=%s' % (x, y, threshold))
if x < threshold or y < threshold:
return True
else:
return False
else:
logging.debug('Could not find dimensions in output of imagemagick for cropping')
return False
# Old code, doesn't really work for pages with small amounts of text
# c = 'identify -verbose %s' % filename
# result = self.cmd(c)
# mStdDev = re.compile("""\s*standard deviation:\s*\d+\.\d+\s*\((?P<percent>\d+\.\d+)\).*""")
# for line in result.splitlines():
# match = mStdDev.search(str(line))
# if match:
# stdev = float(match.group('percent'))
# if stdev > 0.1:
# return False
# return True
def run_postprocess(self, page_files):
cwd = os.getcwd()
os.chdir(self.tmp_dir)
processed_pages = []
self.bw_pages = {}
for page in page_files:
processed_page = '%s_unpaper' % page
c = ['unpaper', page, processed_page]
self.cmd(c)
os.remove(page)
processed_pages.append(processed_page)
self.bw_pages[processed_page] = True
os.chdir(cwd)
return processed_pages
def run_crop(self, page_files):
cwd = os.getcwd()
os.chdir(self.tmp_dir)
crop_pages = []
for i, page in enumerate(page_files):
logging.debug("Cropping page %d" % i)
crop_page = '%s.crop' % page
shave_amt = int(int(self.dpi)*0.1)
c = ['convert',
'-deskew 80%',
'-shave %dx%d' % (shave_amt, shave_amt),
'-fuzz 20%',
'-trim',
'+repage',
]
# Get original dimensions
x, y = self.get_dimensions(page)
if x>0 and y>0:
# IF we know the original dimensions, then just pad back to that with white background
c.extend([ '-gravity center',
'-extent %sx%s' % (x, y),
'-background white',
])
c.extend([ ' %s ' % page,
crop_page,
])
self.cmd(c)
crop_pages.append(crop_page)
if not self.args['--keep-tmpdir']:
os.remove(page)
os.chdir(cwd)
return crop_pages
def run_convert(self, page_files):
cwd = os.getcwd()
os.chdir(self.tmp_dir)
pdf_basename = os.path.basename(self.pdf_filename)
ps_filename = pdf_basename
ps_filename = ps_filename.replace(".pdf", ".ps")
# Convert each page to a ps
for page in page_files:
is_bw = self.bw_pages.get(page, False)
if is_bw:
c = ['convert',
page,
'-density %s' % self.dpi,
'-depth 2',
'-define png:compression-level=9',
'-define png:format=8',
'-define png:color-type=0',
'-define png:bit-depth=2',
'PNG:- | convert - -rotate 180',
'%s.pdf' % page,
]
else:
c = ['convert',
'-density %s' % self.dpi,
'+page', # Make sure it doesn't crop to letter size
'-compress JPEG',
'-sampling-factor 4:2:0',
'-strip',
'-quality 85',
'-interlace JPEG',
'-colorspace RGB',
'-rotate 180',
page,
'%s.pdf' % page,
]
self.cmd(c)
# Create a single ps file using gs
c = ['gs',
'-sDEVICE=pdfwrite',
'-r%s' % self.dpi,
'-dNOPAUSE',
'-dBATCH',
'-dSAFER',
'-sOutputFile=%s' % pdf_basename,
' '.join(['%s.pdf' % p for p in page_files]),
]
self.cmd(c)
c = ['epstopdf',
ps_filename,
]
#self.cmd(c)
#c = ['convert',
#'-density %s' % self.dpi,
#'+page', # Make sure it doesn't crop to letter size
#'-compress JPEG',
#'-sampling-factor 4:2:0',
#'-strip',
#'-quality 85',
#'-interlace JPEG',
#'-colorspace RGB',
#'-rotate 180',
#' '.join(page_files),
#'%s' % pdf_basename,
#]
#self.cmd(c)
#c = ['ps2pdf',
#'-DPDFSETTINGS=/prepress',
#ps_filename,
#pdf_basename,
#]
# unneeded since we're going directly to pdf using imagemagick now
#c = ['epstopdf',
#ps_filename,
#]
#self.cmd(c)
shutil.move(pdf_basename, self.pdf_filename)
if not self.args['--keep-tmpdir']:
for filename in page_files:
os.remove(filename)
# IF we did the scan, then remove the tmp dir too
if self.args['scan'] and not self.args['--keep-tmpdir']:
os.rmdir(self.tmp_dir)
os.chdir(cwd)
def convert_to_bw(self, pages):
new_pages = []
for i, page in enumerate(pages):
filename = os.path.join(self.tmp_dir, page)
logging.info("Checking if %s is bw..." % filename)
if self._is_color(filename):
new_pages.append(page)
logging.info("No, %s is color..." % filename)
self.bw_pages[page] = False
else: # COnvert to BW
bw_page = self._page_to_bw(filename)
logging.info("Yes, %s converted to bw..." % filename)
new_pages.append(bw_page)
self.bw_pages[bw_page] = True
return new_pages
def _page_to_bw(self, page):
out_page = "%s_bw" % page
cwd = os.getcwd()
os.chdir(self.tmp_dir)
cmd = "convert %s +dither -density %s -colors 16 -colors 4 -colorspace gray -normalize %s_bw" % (page, self.dpi, page)
out = self.cmd(cmd)
# Remove the old file
if not self.args['--keep-tmpdir']:
os.remove(page)
os.chdir(cwd)
return out_page
def _is_color(self, filename):
"""
Run the following command from ImageMagick:
::
convert holi.pdf -colors 8 -depth 8 -format %c histogram:info:-
This outputs something like the following:
::
10831: ( 24, 26, 26,255) #181A1A srgba(24,26,26,1)
4836: ( 55, 87, 79,255) #37574F srgba(55,87,79,1)
6564: ( 77,138,121,255) #4D8A79 srgba(77,138,121,1)
4997: ( 86, 96, 93,255) #56605D srgba(86,96,93,1)
7005: ( 92,153,139,255) #5C998B srgba(92,153,139,1)
2479: (143,118,123,255) #8F767B srgba(143,118,123,1)
8870: (169,176,170,255) #A9B0AA srgba(169,176,170,1)
442906: (254,254,254,255) #FEFEFE srgba(254,254,254,1)
1053: ( 0, 0, 0,255) #000000 black
484081: (255,255,255,255) #FFFFFF white
"""
cmd = "convert %s -density %s -adaptive-resize 35%% -colors 8 -depth 8 -format %%c histogram:info:-" % (filename, int(self.dpi/3))
out = self.cmd(cmd)
mLine = re.compile(r"""\s*(?P<count>\d+):\s*\(\s*(?P<R>\d+),\s*(?P<G>\d+),\s*(?P<B>\d+).+""")
colors = []
for line in out.splitlines():
matchLine = mLine.search(str(line))
if matchLine:
logging.debug("Found RGB values")
color = [int(x) for x in (matchLine.group('count'),
matchLine.group('R'),
matchLine.group('G'),
matchLine.group('B'),
)
]
colors.append(color)
# sort
colors.sort(reverse=True, key = lambda x: x[0])
logging.debug(colors)
is_color = False
logging.debug(colors)
for color in colors:
# Calculate the mean differences between the RGB components
# Shades of grey will be very close to zero in this metric...
diff = float(sum([abs(color[2]-color[1]),
abs(color[3]-color[1]),
abs(color[3]-color[2]),
]))/3
if diff > 30:
is_color = True
logging.debug("Found color, diff is %s" % diff)
else:
logging.debug("No color, diff is %s" % diff)
return is_color
def get_options(self, argv):
"""
Parse the command-line options and set the following object properties:
:param argv: usually just sys.argv[1:]
:returns: Nothing
:ivar debug: Enable logging debug statements
:ivar verbose: Enable verbose logging
:ivar config: Dict of the config file
"""
self.args = argv
if argv['--verbose']:
logging.basicConfig(level=logging.INFO, format='%(message)s')
if argv['--debug']:
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
if self.args['pdf']:
self.pdf_filename = os.path.abspath(self.args['<pdffile>'])
self.dpi = int(self.args['--dpi'])
output_dir = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if argv['--tmpdir']:
self.tmp_dir = argv['--tmpdir']
else:
self.tmp_dir = os.path.join('/tmp', output_dir)
self.tmp_dir = os.path.abspath(self.tmp_dir)
# Make the tmp dir only if we're scanning, o/w throw an error
if argv['scan']:
if os.path.exists(self.tmp_dir):
self._error("Temporary output directory %s already exists!" % self.tmp_dir)
else:
os.makedirs(self.tmp_dir)
else:
if not os.path.exists(self.tmp_dir):
self._error("Scan files directory %s does not exist!" % self.tmp_dir)
# Blank checks
self.keep_blanks = argv['--keep-blanks']
self.blank_threshold = float(argv['--blank-threshold'])
assert(self.blank_threshold >= 0 and self.blank_threshold <= 1.0)
self.post_process = argv['--post-process']
def go(self, argv):
"""
The main entry point into ScanPdf
#. Get the options
#. Create the temp dir
#. Run scanadf
"""
# Read the command line options
self.get_options(argv)
logging.info("Temp dir: %s" % self.tmp_dir)
if self.args['scan']:
self.run_scan()
if self.args['pdf']:
# Now, convert the files to ps
pages = self.get_pages()
logging.debug( pages )
if self.args['--face-up']:
pages = self.reorder_face_up(pages)
logging.debug( pages )
# Crop the pages
pages = self.run_crop(pages)
# Now, check if color or bw
pages = self.convert_to_bw(pages)
logging.debug(pages)
# Run blanks
if not self.keep_blanks:
no_blank_pages = []
for i,page in enumerate(pages):
filename = os.path.join(self.tmp_dir, page)
logging.info("Checking if %s is blank..." % filename)
if not self.is_blank(filename):
no_blank_pages.append(page)
else:
logging.info(" page %s is blank, removing..." % i)
os.remove(filename)
pages = no_blank_pages
logging.debug( pages )
if self.post_process:
pages = self.run_postprocess(pages)
self.run_convert(pages)
def main():
args = docopt.docopt(__doc__, version='Scan PDF %s' % __version__ )
script = ScanPdf()
print(args)
script.go(args)
if __name__ == '__main__':
main()
|
virantha/scanpdf
|
scanpdf/scanpdf.py
|
Python
|
apache-2.0
| 18,680
|
[
"ADF"
] |
64c33ce6b1372fe8f32f8d9da9e37fdf690f0b8fb2a5d785ebef6b3ec157d199
|
from flask import flash
from flask.ext.login import current_user
from octopus.models import CaseStaffMap, User
__author__ = 'MartinoW'
def create_query(args, q):
conditions = []
joins = set()
valid = True
user_id = args.get('user_id')
if user_id:
if user_id == "me":
user = User.get_by_id(current_user.id)
else:
try:
user_id = int(user_id)
user = User.get_by_id(user_id)
except ValueError:
flash('Invalid User Id Entered')
valid = False
if valid:
q = q.join(CaseStaffMap, User).filter(
CaseStaffMap.user_id == user.id)
return valid, q
|
quaintm/octopus
|
octopus/case/utils.py
|
Python
|
bsd-3-clause
| 629
|
[
"Octopus"
] |
c0d4ccc730839d2afc9dd1f300e2986ce21431ddd44feda5936108d13c4ebc1f
|
import re
from cfnviz.model import Parameter
from cfnviz.model import Mapping
from cfnviz.model import Condition
from cfnviz.model import Output
from cfnviz.model import Attribute
from cfnviz.model import Edge
from cfnviz.model import Model
def visit(value, fn, context={"path": []}):
path = context.get("path")
context["parent"] = path[-1] if path else None
fn(value, context)
if type(value) is list:
for child in value:
visit(child, fn, context)
elif type(value) is dict:
for name, child in value.iteritems():
context["name"] = name
context["path"].append(name)
visit(child, fn, context)
context["path"].pop()
else:
return
def str_list_values(str):
results = []
matches = re.findall(".*\[(.*?)\].*", str)
if matches:
match = matches[0].strip()
results = [element.strip() for element in match.split(",")]
return results
invalid_name_patterns = ("^Fn::.*", "Ref", "![a-zA-Z]+ .*", "Value")
def find_first_valid_name(path):
for element in path[::-1]:
valid_name = element
for pattern in invalid_name_patterns:
if re.match(pattern, element):
valid_name = None
break
if valid_name:
return valid_name
def collect_references(str):
capturing = False
previous_char = None
reference = ""
references = []
for current_char in str:
if not capturing and previous_char == "$" and current_char == "{":
capturing = True
elif capturing and current_char == "}":
references.append(reference)
reference = ""
capturing = False
elif capturing:
reference = reference + current_char
previous_char = current_char
return references
class ModelFactory(object):
def __init__(self, document):
self.document = document
def __call__(self):
# TODO document needs file name
self.model = Model(description=self.document.get("Description"))
self.collect_parameters()
self.collect_mappings()
self.collect_conditions()
self.collect_outputs()
self.collect_resources()
self.collect_edges()
return self.model
def collect_edges(self):
for resource in self.model.resources.values():
for attribute in resource.attributes.values():
for reference in attribute.refers_to:
dest = reference.split(".")[0]
if dest in self.model.resources:
edge = Edge(resource.name, dest)
self.model.resource_edges.add(edge)
def collect_parameters(self):
if self.document.get("Parameters"):
self.model.parameters = [Parameter(name, doc["Type"]) for name, doc
in self.document["Parameters"]
.iteritems()]
def collect_mappings(self):
if self.document.get("Mappings"):
model = self.model
def parse_mapping(value, context):
if not context.get("parsing"):
context["mapping"] = Mapping([], None)
context["parsing"] = True
elif isinstance(value, (tuple, list, set, dict)):
context["mapping"].key.append(context["name"])
else:
context["mapping"].key = ".".join(context["path"])
context["mapping"].value = value
model.mappings.append(context["mapping"])
context["parsing"] = False
visit(self.document["Mappings"], parse_mapping)
def collect_conditions(self):
if self.document.get("Conditions"):
self.model.conditions = [Condition(name) for name
in self.document["Conditions"].keys()]
def collect_outputs(self):
if self.document.get("Outputs"):
self.model.outputs = [Output(name) for name
in self.document["Outputs"].keys()]
@property
def collect_resources(self):
return ResourceFactory(self.model, self.document.get('Resources'))
class ResourceFactory(object):
def __init__(self, model, document):
self.model = model
self.document = document
def __call__(self):
if not self.document:
return
model = self.model
def collector(value, context):
path = context.get("path")
# name = context.get("name")
name = find_first_valid_name(path)
parent = context.get("parent")
resource = context.get("resource")
attr = context.get("attr")
# if path and "Fn::GetAtt" in path:
# from pdb import set_trace
# set_trace()
try:
if len(path) == 1:
model.resources[name].name = name
model.resources[name].type = value.get("Type")
context["resource"] = model.resources[name]
elif "Ref" in path:
attr = resource.attributes[name]
attr.name = name
attr.refers_to.add(value)
elif parent == "Fn::GetAtt" and type(value) is list:
attr = resource.attributes[name]
attr.name = name
attr.refers_to.add(".".join(value))
elif type(value) is str and "FindInMap" in value:
# TODO this is currently confuckled because pyaml is
# decoding elements of !FindInMap [ Foo, Bar ] as
# ScalarNodes. Need to fix eventually.
map_path = str_list_values(value)
if map_path:
attr = resource.attributes[name]
attr.name = name
attr.refers_to.add(".".join(map_path))
elif type(value) is str and "GetAtt" in value:
attr = resource.attributes[name]
attr.name = name
attr.refers_to.add(value.split()[-1])
elif type(value) is str and "Sub" in value:
references = collect_references(value)
if references:
attr = resource.attributes[name]
attr.name = name
attr.refers_to.update(references)
else:
attr = context["attr"] = Attribute(name, [])
except Exception as e:
print e
from pdb import set_trace
set_trace()
visit(self.document, collector, {"path": []})
|
devquixote/cfnviz
|
cfnviz/factories.py
|
Python
|
mit
| 6,899
|
[
"VisIt"
] |
33a0e8319691651f06f271638f5c9c2afae088c4d170a63e7963c7b6c7b5274a
|
"""
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how the Minimum Covariance Determinant
robust estimator can help concentrate on a relevant cluster when outlying
points exist. Here the empirical covariance estimation is skewed by points
outside of the main cluster. Of course, some screening tools would have pointed
out the presence of two clusters (Support Vector Machines, Gaussian Mixture
Models, univariate outlier detection, ...). But had it been a high-dimensional
example, none of these could be applied that easily.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_wine
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.25),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.25),
"OCSVM": OneClassSVM(nu=0.25, gamma=0.35)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Get data
X1 = load_wine()['data'][:, [1, 2]] # two clusters
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(0, 6, 500), np.linspace(1, 4.5, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (wine recognition)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("outlying points", xy=(4, 2),
xycoords="data", textcoords="data",
xytext=(3, 1.25), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.ylabel("ash")
plt.xlabel("malic_acid")
plt.show()
# %%
# Second example
# --------------
# The second example shows the ability of the Minimum Covariance Determinant
# robust estimator of covariance to concentrate on the main mode of the data
# distribution: the location seems to be well estimated, although the
# covariance is hard to estimate due to the banana-shaped distribution. Anyway,
# we can get rid of some outlying observations. The One-Class SVM is able to
# capture the real data structure, but the difficulty is to adjust its kernel
# bandwidth parameter so as to obtain a good compromise between the shape of
# the data scatter matrix and the risk of over-fitting the data.
# Get data
X2 = load_wine()['data'][:, [6, 9]] # "banana"-shaped
# Learn a frontier for outlier detection with several classifiers
xx2, yy2 = np.meshgrid(np.linspace(-1, 5.5, 500), np.linspace(-2.5, 19, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (wine recognition)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.ylabel("color_intensity")
plt.xlabel("flavanoids")
plt.show()
|
glemaitre/scikit-learn
|
examples/applications/plot_outlier_detection_wine.py
|
Python
|
bsd-3-clause
| 5,819
|
[
"Gaussian"
] |
3715b450f94b3b9dacd369b16b8c4272ac492e60b34869cc741bf03d161ffed4
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
__version__ = '1.4.0'
|
ened/scancode-toolkit
|
src/scancode/__init__.py
|
Python
|
apache-2.0
| 1,380
|
[
"VisIt"
] |
e15257c0c52cee9e12bb94ba9e7e91fe41b1d92c4b2162e81817439df28d0d49
|
import tensorflow as tf
import numpy as np
from skimage.segmentation import felzenszwalb as skimage_felzenszwalb
SCALE = 1.0
SIGMA = 0.0
MIN_SIZE = 20
def felzenszwalb(image, scale=SCALE, sigma=SIGMA, min_size=MIN_SIZE):
"""Computes Felsenszwalb's efficient graph based image segmentation.
Args:
image: The image.
scale: Float indicating largeness of clusters (optional).
sigma: Width of Gaussian kernel used in preprocessing (optional).
min_size: Minimum component size. Enforced using postprocessing
(optional).
Returns:
Integer mask indicating segment labels.
"""
image = tf.cast(image, tf.uint8)
def _felzenszwalb(image):
segmentation = skimage_felzenszwalb(image, scale, sigma, min_size)
return segmentation.astype(np.int32)
return tf.py_func(_felzenszwalb, [image], tf.int32, stateful=False,
name='felzenszwalb')
def felzenszwalb_generator(scale=SCALE, sigma=SIGMA, min_size=MIN_SIZE):
"""Generator to compute Felsenszwalb's efficient graph based image
segmentation.
Args:
scale: Float indicating largeness of clusters (optional).
sigma: Width of Gaussian kernel used in preprocessing (optional).
min_size: Minimum component size. Enforced using postprocessing
(optional).
Returns:
Segmentation algorithm that takes a single input image.
"""
def _generator(image):
return felzenszwalb(image, scale, sigma, min_size)
return _generator
def felzenszwalb_json_generator(config):
"""Generator to compute Felsenszwalb's efficient graph based image
segmentation based on a json object.
Args:
config: A configuration object with sensible defaults for
missing values.
Returns:
Segmentation algorithm that takes a single input image.
"""
return felzenszwalb_generator(config.get('scale', SCALE),
config.get('sigma', SIGMA),
config.get('min_size', MIN_SIZE))
|
rusty1s/graph-based-image-classification
|
segmentation/algorithm/felzenszwalb.py
|
Python
|
mit
| 2,086
|
[
"Gaussian"
] |
65265c802c2e233255c5cd34aa6716ec29d9d6e3cf31290d970cbdd70868cc36
|
#!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test a variety of custom integrators.
DESCRIPTION
TODO
COPYRIGHT AND LICENSE
@author John D. Chodera <jchodera@gmail.com>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import sys
import math
import doctest
import numpy
import time
import simtk.unit as units
import simtk.openmm as openmm
import test_systems as testsystems
#=============================================================================================
# CONSTANTS
#=============================================================================================
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
#=============================================================================================
# INTEGRATORS
#=============================================================================================
def VelocityVerletIntegrator(timestep):
"""
Construct a velocity Verlet integrator.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a velocity Verlet integrator
NOTES
This code is verbatim from Peter Eastman's example.
"""
integrator = openmm.CustomIntegrator(timestep)
integrator.addPerDofVariable("x1", 0)
integrator.addUpdateContextState()
integrator.addComputePerDof("v", "v+0.5*dt*f/m")
integrator.addComputePerDof("x", "x+dt*v")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions()
integrator.addComputePerDof("v", "v+0.5*dt*f/m+(x-x1)/dt")
integrator.addConstrainVelocities()
return integrator
def AndersenVelocityVerletIntegrator(timestep, friction, temperature):
"""
Construct a velocity Verlet integrator.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a velocity Verlet integrator
NOTES
This code is verbatim from Peter Eastman's example.
"""
integrator = openmm.CustomIntegrator(timestep)
#
# Integrator setup.
#
kT = kB * temperature
integrator.addGlobalVariable("kT", kT) # thermal energy
integrator.addPerDofVariable("sigma_v", 0) # velocity distribution stddev for Maxwell-Boltzmann (set later)
integrator.addPerDofVariable("x1", 0) # for constraints
#
# Update velocities from Maxwell-Boltzmann distribution.
#
integrator.addComputePerDof("sigma_v", "sqrt(kT/m)")
integrator.addComputePerDof("v", "sigma_v*gaussian")
#
# Velocity Verlet
#
integrator.addUpdateContextState()
integrator.addComputePerDof("v", "v+0.5*dt*f/m")
integrator.addComputePerDof("x", "x+dt*v")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions()
integrator.addComputePerDof("v", "v+0.5*dt*f/m+(x-x1)/dt")
integrator.addConstrainVelocities()
return integrator
def MetropolisMonteCarloIntegrator(timestep, temperature=298.0*units.kelvin, sigma=0.01*units.angstroms):
"""
Create a simple Metropolis Monte Carlo integrator that uses Gaussian displacement trials.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
temperature (numpy.unit.Quantity compatible with kelvin) - the temperature
sigma (numpy.unit.Quantity compatible with nanometers) - the displacement standard deviation for each degree of freedom
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a Metropolis Monte Carlo integrator
WARNING
This integrator does not respect constraints.
NOTES
Velocities are drawn from a Maxwell-Boltzmann distribution each timestep to generate correct (x,v) statistics.
Additional global variables 'ntrials' and 'naccept' keep track of how many trials have been attempted and accepted, respectively.
"""
integrator = openmm.CustomIntegrator(timestep)
kT = kB * temperature
integrator.addGlobalVariable("naccept", 0) # number accepted
integrator.addGlobalVariable("ntrials", 0) # number of Metropolization trials
integrator.addGlobalVariable("kT", kT) # thermal energy
integrator.addPerDofVariable("sigma_x", sigma) # perturbation size
integrator.addPerDofVariable("sigma_v", 0) # velocity distribution stddev for Maxwell-Boltzmann (set later)
integrator.addPerDofVariable("xold", 0) # old positions
integrator.addGlobalVariable("Eold", 0) # old energy
integrator.addGlobalVariable("Enew", 0) # new energy
integrator.addGlobalVariable("accept", 0) # accept or reject
#
# Context state update.
#
integrator.addUpdateContextState();
#
# Update velocities from Maxwell-Boltzmann distribution.
#
integrator.addComputePerDof("sigma_v", "sqrt(kT/m)")
integrator.addComputePerDof("v", "sigma_v*gaussian")
integrator.addConstrainVelocities();
#
# propagation steps
#
# Store old positions and energy.
integrator.addComputePerDof("xold", "x")
integrator.addComputeGlobal("Eold", "energy")
# Gaussian particle displacements.
integrator.addComputePerDof("x", "x + sigma_x*gaussian")
# Accept or reject with Metropolis criteria.
integrator.addComputeGlobal("accept", "step(exp(-(energy-Eold)/kT) - uniform)")
integrator.addComputePerDof("x", "(1-accept)*xold + x*accept")
# Accumulate acceptance statistics.
integrator.addComputeGlobal("naccept", "naccept + accept")
integrator.addComputeGlobal("ntrials", "ntrials + 1")
return integrator
def HMCIntegrator(timestep, temperature=298.0*units.kelvin, nsteps=10):
"""
Create a hybrid Monte Carlo (HMC) integrator.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
temperature (numpy.unit.Quantity compatible with kelvin) - the temperature
nsteps (int) - the number of velocity Verlet steps to take per HMC trial
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a hybrid Monte Carlo integrator
WARNING
Because 'nsteps' sets the number of steps taken, a call to integrator.step(1) actually takes 'nsteps' steps.
NOTES
The velocity is drawn from a Maxwell-Boltzmann distribution, then 'nsteps' steps are taken,
and the new configuration is either accepted or rejected.
Additional global variables 'ntrials' and 'naccept' keep track of how many trials have been attempted and
accepted, respectively.
TODO
Currently, the simulation timestep is only advanced by 'timestep' each step, rather than timestep*nsteps. Fix this.
"""
kT = kB * temperature
integrator = openmm.CustomIntegrator(timestep)
integrator.addGlobalVariable("naccept", 0) # number accepted
integrator.addGlobalVariable("ntrials", 0) # number of Metropolization trials
integrator.addGlobalVariable("kT", kB*temperature) # thermal energy
integrator.addPerDofVariable("sigma", 0)
integrator.addGlobalVariable("ke", 0) # kinetic energy
integrator.addPerDofVariable("xold", 0) # old positions
integrator.addGlobalVariable("Eold", 0) # old energy
integrator.addGlobalVariable("Enew", 0) # new energy
integrator.addGlobalVariable("accept", 0) # accept or reject
integrator.addPerDofVariable("x1", 0) # for constraints
#
# Pre-computation.
# This only needs to be done once, but it needs to be done for each degree of freedom.
# Could move this to initialization?
#
integrator.addComputePerDof("sigma", "sqrt(kT/m)")
#
# Allow Context updating here.
#
integrator.addUpdateContextState();
#
# Draw new velocity.
#
integrator.addComputePerDof("v", "sigma*gaussian")
integrator.addConstrainVelocities();
#
# Store old position and energy.
#
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Eold", "ke + energy")
integrator.addComputePerDof("xold", "x")
#
# Inner symplectic steps using velocity Verlet.
#
for step in range(nsteps):
integrator.addUpdateContextState()
integrator.addComputePerDof("v", "v+0.5*dt*f/m")
integrator.addComputePerDof("x", "x+dt*v")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions()
integrator.addComputePerDof("v", "v+0.5*dt*f/m+(x-x1)/dt")
integrator.addConstrainVelocities()
#
# Accept/reject step.
#
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Enew", "ke + energy")
integrator.addComputeGlobal("accept", "step(exp(-(Enew-Eold)/kT) - uniform)")
integrator.addComputePerDof("x", "x*accept + xold*(1-accept)")
#
# Accumulate statistics.
#
integrator.addComputeGlobal("naccept", "naccept + accept")
integrator.addComputeGlobal("ntrials", "ntrials + 1")
return integrator
def GHMCIntegrator(timestep, temperature=298.0*units.kelvin, gamma=50.0/units.picoseconds):
"""
Create a generalized hybrid Monte Carlo (GHMC) integrator.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
temperature (numpy.unit.Quantity compatible with kelvin) - the temperature
gamma (numpy.unit.Quantity compatible with 1/picoseconds) - the collision rate
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a GHMC integrator
NOTES
This integrator is equivalent to a Langevin integrator in the velocity Verlet discretization with a
Metrpolization step to ensure sampling from the appropriate distribution.
Additional global variables 'ntrials' and 'naccept' keep track of how many trials have been attempted and
accepted, respectively.
TODO
Move initialization of 'sigma' to setting the per-particle variables.
"""
kT = kB * temperature
integrator = openmm.CustomIntegrator(timestep)
integrator.addGlobalVariable("kT", kB*temperature) # thermal energy
integrator.addGlobalVariable("b", numpy.exp(-gamma*timestep)) # velocity mixing parameter
integrator.addPerDofVariable("sigma", 0)
integrator.addGlobalVariable("ke", 0) # kinetic energy
integrator.addPerDofVariable("vold", 0) # old velocities
integrator.addPerDofVariable("xold", 0) # old positions
integrator.addGlobalVariable("Eold", 0) # old energy
integrator.addGlobalVariable("Enew", 0) # new energy
integrator.addGlobalVariable("accept", 0) # accept or reject
integrator.addGlobalVariable("naccept", 0) # number accepted
integrator.addGlobalVariable("ntrials", 0) # number of Metropolization trials
integrator.addPerDofVariable("x1", 0) # position before application of constraints
#
# Pre-computation.
# This only needs to be done once, but it needs to be done for each degree of freedom.
# Could move this to initialization?
#
integrator.addComputePerDof("sigma", "sqrt(kT/m)")
#
# Allow context updating here.
#
integrator.addUpdateContextState();
#
# Velocity perturbation.
#
integrator.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
integrator.addConstrainVelocities();
#
# Metropolized symplectic step.
#
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Eold", "ke + energy")
integrator.addComputePerDof("xold", "x")
integrator.addComputePerDof("vold", "v")
integrator.addComputePerDof("v", "v + 0.5*dt*f/m")
integrator.addComputePerDof("x", "x + v*dt")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions();
integrator.addComputePerDof("v", "v + 0.5*dt*f/m + (x-x1)/dt")
integrator.addConstrainVelocities();
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Enew", "ke + energy")
integrator.addComputeGlobal("accept", "step(exp(-(Enew-Eold)/kT) - uniform)")
integrator.addComputePerDof("x", "x*accept + xold*(1-accept)")
integrator.addComputePerDof("v", "v*accept - vold*(1-accept)")
#
# Velocity randomization
#
integrator.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
integrator.addConstrainVelocities();
#
# Accumulate statistics.
#
integrator.addComputeGlobal("naccept", "naccept + accept")
integrator.addComputeGlobal("ntrials", "ntrials + 1")
return integrator
def VVVRIntegrator(timestep, temperature=298.0*units.kelvin, gamma=50.0/units.picoseconds):
"""
Create a velocity verlet with velocity randomization (VVVR) integrator.
ARGUMENTS
timestep (numpy.unit.Quantity compatible with femtoseconds) - the integration timestep
temperature (numpy.unit.Quantity compatible with kelvin) - the temperature
gamma (numpy.unit.Quantity compatible with 1/picoseconds) - the collision rate
RETURNS
integrator (simtk.openmm.CustomIntegrator) - a VVVR integrator
NOTES
This integrator is equivalent to a Langevin integrator in the velocity Verlet discretization with a
timestep correction to ensure that the field-free diffusion constant is timestep invariant.
The global 'pseudowork' keeps track of the pseudowork accumulated during integration, and can be
used to correct the sampled statistics or in a Metropolization scheme.
TODO
Move initialization of 'sigma' to setting the per-particle variables.
We can ditch pseudowork and instead use total energy difference - heat.
"""
kT = kB * temperature
integrator = openmm.CustomIntegrator(timestep)
integrator.addGlobalVariable("kT", kT) # thermal energy
integrator.addGlobalVariable("b", numpy.exp(-gamma*timestep)) # velocity mixing parameter
integrator.addPerDofVariable("sigma", 0)
integrator.addGlobalVariable("ke_old", 0) # kinetic energy
integrator.addGlobalVariable("ke_new", 0) # kinetic energy
integrator.addGlobalVariable("ke", 0) # kinetic energy
integrator.addGlobalVariable("Eold", 0) # old energy
integrator.addGlobalVariable("Enew", 0) # new energy
integrator.addGlobalVariable("accept", 0) # accept or reject
integrator.addGlobalVariable("naccept", 0) # number accepted
integrator.addGlobalVariable("ntrials", 0) # number of Metropolization trials
integrator.addPerDofVariable("x1", 0) # position before application of constraints
integrator.addGlobalVariable("pseudowork", 0) # accumulated pseudowork
integrator.addGlobalVariable("heat", 0) # accumulated heat
#
# Allow context updating here.
#
integrator.addUpdateContextState();
#
# Pre-computation.
# This only needs to be done once, but it needs to be done for each degree of freedom.
# Could move this to initialization?
#
integrator.addComputePerDof("sigma", "sqrt(kT/m)")
#
# Velocity perturbation.
#
integrator.addComputeSum("ke_old", "0.5*m*v*v")
integrator.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
integrator.addConstrainVelocities();
integrator.addComputeSum("ke_new", "0.5*m*v*v")
integrator.addComputeGlobal("heat", "heat + (ke_new - ke_old)")
#
# Metropolized symplectic step.
#
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Eold", "ke + energy")
integrator.addComputePerDof("v", "v + 0.5*dt*f/m")
integrator.addComputePerDof("x", "x + v*dt")
integrator.addComputePerDof("x1", "x")
integrator.addConstrainPositions();
integrator.addComputePerDof("v", "v + 0.5*dt*f/m + (x-x1)/dt")
integrator.addConstrainVelocities();
integrator.addComputeSum("ke", "0.5*m*v*v")
integrator.addComputeGlobal("Enew", "ke + energy")
#
# Accumulate statistics.
#
integrator.addComputeGlobal("pseudowork", "pseudowork + (Enew-Eold)") # accumulate pseudowork
integrator.addComputeGlobal("naccept", "naccept + 1")
integrator.addComputeGlobal("ntrials", "ntrials + 1")
#
# Velocity randomization
#
integrator.addComputeSum("ke_old", "0.5*m*v*v")
integrator.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
integrator.addConstrainVelocities();
integrator.addComputeSum("ke_new", "0.5*m*v*v")
integrator.addComputeGlobal("heat", "heat + (ke_new - ke_old)")
return integrator
#=============================================================================================
# UTILITY SUBROUTINES
#=============================================================================================
def generateMaxwellBoltzmannVelocities(system, temperature):
"""Generate Maxwell-Boltzmann velocities.
ARGUMENTS
system (simtk.openmm.System) - the system for which velocities are to be assigned
temperature (simtk.unit.Quantity of temperature) - the temperature at which velocities are to be assigned
RETURNS
velocities (simtk.unit.Quantity of numpy Nx3 array, units length/time) - particle velocities
TODO
This could be sped up by introducing vector operations.
"""
# Get number of atoms
natoms = system.getNumParticles()
# Create storage for velocities.
velocities = units.Quantity(numpy.zeros([natoms, 3], numpy.float32), units.nanometer / units.picosecond) # velocities[i,k] is the kth component of the velocity of atom i
# Compute thermal energy and inverse temperature from specified temperature.
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
kT = kB * temperature # thermal energy
beta = 1.0 / kT # inverse temperature
# Assign velocities from the Maxwell-Boltzmann distribution.
for atom_index in range(natoms):
mass = system.getParticleMass(atom_index) # atomic mass
sigma = units.sqrt(kT / mass) # standard deviation of velocity distribution for each coordinate for this atom
for k in range(3):
velocities[atom_index,k] = sigma * numpy.random.normal()
# Return velocities
return velocities
def computeHarmonicOscillatorExpectations(K, mass, temperature):
"""
Compute mean and variance of potential and kinetic energies for harmonic oscillator.
Numerical quadrature is used.
ARGUMENTS
K - spring constant
mass - mass of particle
temperature - temperature
RETURNS
values
"""
values = dict()
# Compute thermal energy and inverse temperature from specified temperature.
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
kT = kB * temperature # thermal energy
beta = 1.0 / kT # inverse temperature
# Compute standard deviation along one dimension.
sigma = 1.0 / units.sqrt(beta * K)
# Define limits of integration along r.
r_min = 0.0 * units.nanometers # initial value for integration
r_max = 10.0 * sigma # maximum radius to integrate to
# Compute mean and std dev of potential energy.
V = lambda r : (K/2.0) * (r*units.nanometers)**2 / units.kilojoules_per_mole # potential in kJ/mol, where r in nm
q = lambda r : 4.0 * math.pi * r**2 * math.exp(-beta * (K/2.0) * (r*units.nanometers)**2) # q(r), where r in nm
(IqV2, dIqV2) = scipy.integrate.quad(lambda r : q(r) * V(r)**2, r_min / units.nanometers, r_max / units.nanometers)
(IqV, dIqV) = scipy.integrate.quad(lambda r : q(r) * V(r), r_min / units.nanometers, r_max / units.nanometers)
(Iq, dIq) = scipy.integrate.quad(lambda r : q(r), r_min / units.nanometers, r_max / units.nanometers)
values['potential'] = dict()
values['potential']['mean'] = (IqV / Iq) * units.kilojoules_per_mole
values['potential']['stddev'] = (IqV2 / Iq) * units.kilojoules_per_mole
# Compute mean and std dev of kinetic energy.
values['kinetic'] = dict()
values['kinetic']['mean'] = (3./2.) * kT
values['kinetic']['stddev'] = math.sqrt(3./2.) * kT
return values
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
REQUIRED ARGUMENTS
A_n (numpy array) - A_n[n] is nth value of timeseries A. Length is deduced from vector.
OPTIONAL ARGUMENTS
B_n (numpy array) - B_n[n] is nth value of timeseries B. Length is deduced from vector.
If supplied, the cross-correlation of timeseries A and B will be estimated instead of the
autocorrelation of timeseries A.
fast (boolean) - if True, will use faster (but less accurate) method to estimate correlation
time, described in Ref. [1] (default: False)
mintime (int) - minimum amount of correlation function to compute (default: 3)
The algorithm terminates after computing the correlation time out to mintime when the
correlation function furst goes negative. Note that this time may need to be increased
if there is a strong initial negative peak in the correlation function.
RETURNS
g is the estimated statistical inefficiency (equal to 1 + 2 tau, where tau is the correlation time).
We enforce g >= 1.0.
NOTES
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
REFERENCES
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
EXAMPLES
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
"""
# Create numpy copies of input arguments.
A_n = numpy.array(A_n)
if B_n is not None:
B_n = numpy.array(B_n)
else:
B_n = numpy.array(A_n)
# Get the length of the timeseries.
N = A_n.size
# Be sure A_n and B_n have the same dimensions.
if(A_n.shape != B_n.shape):
raise ParameterError('A_n and B_n must have same dimensions.')
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(numpy.float64) - mu_A
dB_n = B_n.astype(numpy.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if(sigma2_AB == 0):
raise ParameterException('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency')
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while (t < N-1):
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if (g < 1.0): g = 1.0
# Return the computed statistical inefficiency.
return g
#=============================================================================================
# MAIN
#=============================================================================================
# Test integrator.
timestep = 1.0 * units.femtosecond
temperature = 298.0 * units.kelvin
kT = kB * temperature
friction = 20.0 / units.picosecond
nsteps = 1000
niterations = 100
# Select system:
testsystem = testsystems.MolecularIdealGas()
#testsystem = testsystems.AlanineDipeptideImplicit(flexibleConstraints=False, shake=True)
#testsystem = testsystems.LysozymeImplicit(flexibleConstraints=False, shake=True)
#testsystem = testsystems.HarmonicOscillator()
#testsystem = testsystems.HarmonicOscillatorArray(N=16)
#testsystem = testsystems.AlanineDipeptideExplicit(flexibleConstraints=False, shake=True)
# Retrieve system and positions.
[system, positions] = [testsystem.system, testsystem.positions]
velocities = generateMaxwellBoltzmannVelocities(system, temperature)
ndof = 3*system.getNumParticles() - system.getNumConstraints()
# Select integrator:
integrator = openmm.LangevinIntegrator(temperature, friction, timestep)
#integrator = AndersenVelocityVerletIntegrator(temperature, timestep)
#integrator = MetropolisMonteCarloIntegrator(timestep, temperature=temperature)
#integrator = HMCIntegrator(timestep, temperature=temperature)
#integrator = VVVRIntegrator(timestep, temperature=temperature)
#integrator = GHMCIntegrator(timestep, temperature=temperature)
#integrator = VelocityVerletIntegrator(timestep)
#integrator = openmm.VerletIntegrator(timestep)
# Create Context and set positions and velocities.
context = openmm.Context(system, integrator)
context.setPositions(positions)
context.setVelocities(velocities)
print context.getPlatform().getName()
# Minimize
#openmm.LocalEnergyMinimizer.minimize(context)
# Accumulate statistics.
x_n = numpy.zeros([niterations], numpy.float64) # x_n[i] is the x position of atom 1 after iteration i, in angstroms
potential_n = numpy.zeros([niterations], numpy.float64) # potential_n[i] is the potential energy after iteration i, in kT
kinetic_n = numpy.zeros([niterations], numpy.float64) # kinetic_n[i] is the kinetic energy after iteration i, in kT
temperature_n = numpy.zeros([niterations], numpy.float64) # temperature_n[i] is the instantaneous kinetic temperature from iteration i, in K
for iteration in range(niterations):
print "iteration %d / %d : propagating for %d steps..." % (iteration, niterations, nsteps)
state = context.getState(getEnergy=True)
initial_potential_energy = state.getPotentialEnergy()
initial_kinetic_energy = state.getKineticEnergy()
initial_total_energy = initial_kinetic_energy + initial_potential_energy
initial_time = time.time()
integrator.step(nsteps)
state = context.getState(getEnergy=True, getPositions=True)
final_potential_energy = state.getPotentialEnergy()
final_kinetic_energy = state.getKineticEnergy()
final_total_energy = final_kinetic_energy + final_potential_energy
final_time = time.time()
elapsed_time = final_time - initial_time
delta_total_energy = final_total_energy - initial_total_energy
instantaneous_temperature = final_kinetic_energy * 2.0 / ndof / (units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA)
print "total energy: initial %8.1f kT | final %8.1f kT | delta = %8.3f kT | instantaneous temperature: %8.1f K | time %.3f s" % (initial_total_energy/kT, final_total_energy/kT, delta_total_energy/kT, instantaneous_temperature/units.kelvin, elapsed_time)
#pseudowork = integrator.getGlobalVariable(0) * units.kilojoules_per_mole / kT
#b = integrator.getGlobalVariable(2)
#c = integrator.getGlobalVariable(3)
#print (pseudowork, b, c)
# global_variables = { integrator.getGlobalVariableName(index) : index for index in range(integrator.getNumGlobalVariables()) }
# naccept = integrator.getGlobalVariable(global_variables['naccept'])
# ntrials = integrator.getGlobalVariable(global_variables['ntrials'])
# print "accepted %d / %d (%.3f %%)" % (naccept, ntrials, float(naccept)/float(ntrials)*100.0)
# Accumulate statistics.
x_n[iteration] = state.getPositions(asNumpy=True)[0,0] / units.angstroms
potential_n[iteration] = final_potential_energy / kT
kinetic_n[iteration] = final_kinetic_energy / kT
temperature_n[iteration] = instantaneous_temperature / units.kelvin
# Compute expected statistics for harmonic oscillator.
K = 100.0 * units.kilocalories_per_mole / units.angstroms**2
beta = 1.0 / kT
x_mean_exact = 0.0 # mean, in angstroms
x_std_exact = 1.0 / units.sqrt(beta * K) / units.angstroms # std dev, in angstroms
# Analyze statistics.
g = statisticalInefficiency(x_n)
Neff = niterations / g # number of effective samples
x_mean = x_n.mean()
dx_mean = x_n.std() / numpy.sqrt(Neff)
x_mean_error = x_mean - x_mean_exact
x_var = x_n.var()
dx_var = x_var * numpy.sqrt(2. / (Neff-1))
x_std = x_n.std()
dx_std = 0.5 * dx_var / x_std
x_std_error = x_std - x_std_exact
temperature_mean = temperature_n.mean()
dtemperature_mean = temperature_n.std() / numpy.sqrt(Neff)
temperature_error = temperature_mean - temperature/units.kelvin
nsigma = abs(temperature_error) / dtemperature_mean
nsigma_cutoff = 6.0
# TODO: Rework ugly statistics calculation and add nsigma deviation information.
print "positions"
print " mean observed %10.5f +- %10.5f expected %10.5f error %10.5f +- %10.5f" % (x_mean, dx_mean, x_mean_exact, x_mean_error, dx_mean)
print " std observed %10.5f +- %10.5f expected %10.5f error %10.5f +- %10.5f" % (x_std, dx_std, x_std_exact, x_std_error, dx_std)
print "temperature"
if nsigma < nsigma_cutoff:
print " mean observed %10.5f +- %10.5f expected %10.5f error %10.5f +- %10.5f (%.1f sigma)" % (temperature_mean, dtemperature_mean, temperature/units.kelvin, temperature_error, dtemperature_mean, nsigma)
else:
print " mean observed %10.5f +- %10.5f expected %10.5f error %10.5f +- %10.5f (%.1f sigma) ***" % (temperature_mean, dtemperature_mean, temperature/units.kelvin, temperature_error, dtemperature_mean, nsigma)
|
choderalab/YankTools
|
testsystems/test_custom_integrators.py
|
Python
|
gpl-2.0
| 31,219
|
[
"Gaussian",
"OpenMM"
] |
29e3dce498ce1c3949bf339e2fa1765bd4ef0c26c8584713aa21f7fec9896750
|
"""
Example to generate a .fit, .mod and .dat file to feed in MrMoose for
demonstration. The model consists of a double power-law and and a break frequency
six data points from a source at z=4, but will be fitted as a free parameter
"""
import sys
# adding the path
sys.path.insert(0, '/Users/guillaume/Desktop/MrMoose/MrMoose/')
import utils.models as md
import numpy as np
import utils.mm_utilities as mm
import utils.read_files as rd
norm = 1.0
nu_break = 9.0
alpha1 = 2.
alpha2 = -1.5
nu = 10**np.linspace(6, 11, 10000)
redshift = 4.
fnu = md.double_sync_law(nu, [norm, nu_break, alpha1, alpha2], redshift)
filter_name = np.array(['74MHz(VLA)', '178MHz', '408MHz', '1.4GHz', '4.85GHz', '8.4GHz'])
sn_mod = [15., 15., 15., 15., 15., 15.]
RA_list = ['12h00m00s', ]*6
Dec_list = ['-40d00m00s', ]*6
res_list = [12., ]*6
fnu_mod = np.zeros(filter_name.size)
fnu_err = np.zeros(filter_name.size)
lambda0 = np.zeros(filter_name.size)
# run through the filters
for i_filter, name_filter in enumerate(filter_name):
# read the filter transmission
nu_filter, trans_filter = rd.read_single_filter('../filters/'+name_filter+'.fil')
# calculate the lambda0
lambda0[i_filter] = np.average(nu_filter, weights=trans_filter)
# perform the integration
tmp = mm.integrate_filter(nu, fnu, nu_filter, trans_filter)
# add a gaussian noise (depending on the signal to noise defined previously)
fnu_err[i_filter] = tmp/sn_mod[i_filter]
fnu_mod[i_filter] = np.random.normal(tmp, fnu_err[i_filter])
# create the data file
with open('../data/fake_source_ex1cz.dat', 'w') as fake:
fake.write("# filter RA Dec resolution lambda0 det_type flux flux_error arrangement component component_number \n")
for i_filter in range(filter_name.size-1):
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10} \n'.format(
filter_name[i_filter], RA_list[i_filter], Dec_list[i_filter], res_list[i_filter],
lambda0[i_filter], "d", fnu_mod[i_filter], fnu_err[i_filter], "1", "note", "0"))
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10}'.format(
filter_name[i_filter+1], RA_list[i_filter+1], Dec_list[i_filter+1], res_list[i_filter+1],
lambda0[i_filter+1], "d", fnu_mod[i_filter+1], fnu_err[i_filter+1], "1", "note", "0,"))
# create the fit file
redshift_fit = -1.
with open('../fake_source_ex1cz.fit', 'w') as fake:
fake.write('source_file: data/fake_source_ex1cz.dat \n')
fake.write('model_file: models/fake_source_ex1cz.mod \n')
fake.write('all_same_redshift: True \n')
fake.write('redshift: ['+str(redshift_fit)+,'] \n')
fake.write('nwalkers: 20 \n')
fake.write('nsteps: 20 \n')
fake.write('nsteps_cut: 18 \n')
fake.write('percentiles: [10., 25., 50., 75., 90.] \n')
fake.write('skip_imaging: True \n')
fake.write('skip_fit: False \n')
fake.write('skip_MCChains: False \n')
fake.write('skip_triangle: False \n')
fake.write('skip_SED: False \n')
fake.write("unit_obs: 'Hz' \n")
fake.write("unit_flux: 'Jy' \n")
# create the model file
with open('../models/fake_source_ex1cz.mod', 'w') as fake:
fake.write('double_sync_law_z 5 \n')
fake.write('$N$ -25 -15 \n')
fake.write('$\\nu_{break}$ 8.0 10.0 \n')
fake.write('$\\alpha_1$ 0.5 4.0 \n')
fake.write('$\\alpha_2$ -3.0 0.0 \n')
fake.write('$z$ 0 7.0 \n')
|
gdrouart/MrMoose
|
examples/example_1cz.py
|
Python
|
gpl-3.0
| 3,478
|
[
"Gaussian"
] |
75191677b57edfe0837bf45a588f7bc91c6926f8664ac0330da06afa2b725ca0
|
# Copyright (C) 2014 Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************
espressopp.analysis.TotalVelocity
*********************************
.. function:: espressopp.analysis.TotalVelocity(system)
:param system: The system object.
:type system: espressopp.System
.. function:: espressopp.analysis.TotalVelocity.compute()
Compute the total velocity of the system.
:rtype: float
.. function:: espressopp.analysis.TotalVelocity.reset()
Subtract the total velocity of the system from every particle.
Example of resetting velocity
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> total_velocity.reset()
Example of attaching to integrator
>>> # This extension can be attached to integrator
>>> # and run `reset()` every `n-th` steps.
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> ext_remove_com = espressopp.analysis.ExtAnalyze(total_velocity, 10)
>>> integrator.addExtension(ext_remove_com)
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_TotalVelocity
class TotalVelocityLocal(ObservableLocal, analysis_TotalVelocity):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_TotalVelocity, system)
def compute(self):
return self.cxxclass.compute(self)
def reset(self):
return self.cxxclass.reset(self)
if pmi.isController :
class TotalVelocity(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.TotalVelocityLocal',
pmicall = [ "compute", "reset" ],
pmiproperty = ["v"]
)
|
acfogarty/espressopp
|
src/analysis/TotalVelocity.py
|
Python
|
gpl-3.0
| 2,655
|
[
"ESPResSo"
] |
7f6d91ef253c7c2c8e5bb9041bb3a3544e08812e2e016657b4a48d425ea6bb45
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytical nuclear gradients
============================
Simple usage::
>>> from pyscf import gto, scf, grad
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> grad.RHF(mf).kernel()
'''
from . import rhf
from . import dhf
from . import uhf
from . import rohf
from .rhf import Gradients as RHF
from .dhf import Gradients as DHF
from .uhf import Gradients as UHF
from .rohf import Gradients as ROHF
grad_nuc = rhf.grad_nuc
try:
from . import casci
from . import casscf
from . import ccsd
#from . import ccsd_t
from . import cisd
from . import mp2
from . import rks
from . import roks
from . import tdrhf
from . import tdrks
from . import tduhf
from . import tduks
from . import uccsd
#from . import uccsd_t
from . import ucisd
from . import uks
from . import ump2
from .rks import Gradients as RKS
from .uks import Gradients as UKS
from .roks import Gradients as ROKS
except ImportError:
pass
|
gkc1000/pyscf
|
pyscf/grad/__init__.py
|
Python
|
apache-2.0
| 1,723
|
[
"PySCF"
] |
c5dcaf61e68a8744a4db538eaa84b34e1aa22cc55ae5abc37058005992bb4d80
|
import cdms2
import os, sys
from harmonic_util import harmonic
# setting the absolute path of the previous directory
# getting the this py module path by __file__ variable
# pass that __file__ to the os.path.dirname, returns the path of this module
__curDir__ = os.path.dirname(__file__)
previousDir = os.path.abspath(os.path.join(__curDir__, '../..'))
# adding the previous path to python path
sys.path.append(previousDir)
from diag_setup.globalconfig import processfilesPath, climatologies
import diag_setup.netcdf_settings
def createHarmonic(inpath, outpath, pfile):
"""
createHarmonic : This function will create harmonic climatolgies
inpath : absolute path of climatologies
outpath : absolute path of harmonic climatolgies going to be store.
pfile : climatology partial file name.
Written By : Arulalan.T
Date : 26.10.2014
"""
ncfiles = [f for f in os.listdir(inpath) if f.endswith(pfile)]
for fname in ncfiles:
# get varName
varName = fname.split('.')[0]
# get climatolgy data
infilepath = os.path.join(inpath, fname)
inf = cdms2.open(infilepath)
cdata = inf(varName)
# apply harmonic over the above climatolgy data
# sum of mean and first three harmonic of climatolgy
hdata = harmonic(cdata, k=3, time_type='daily', phase_shift=15)
# make memory free
del cdata
# get outfile name
outfname = fname.split('.')
outfname = '.'.join(outfname.insert(-1, 'harmonic'))
outfpath = os.path.join(outpath, outfname)
# write harmonic climatolgy data
outf = cdms2.open(outfpath)
outf.write(hdata)
outf.close()
inf.close()
# make memory free
del hdata
# end of for fname in ncfiles:
# end of def createHarmonic(inpath, outpath, pfile):
if __name__ == '__main__':
for climatology in climatologies:
if climatology.dfile and climatolgy.name.lower() == 'miso':
# daily climatolgy path
climatologyPath = os.path.join(climatology.path, 'Daily')
# calling below fn to create daily harmonic climatology
createHarmonic(climatologyPath, climatologyPath,
climatology.dfile)
else:
print "In configure.txt climatolgyname is not 'miso' or \
climpartialdayfile not mentioned. \
So can not compute daily harmonic climatolgies."
# end of if climatology.dfile and ...:
# end of for climatology in climatologies:
print "Done! Creation of Daily harmonic climatolgies netCdf Files"
# end of if __name__ == '__main__':
|
arulalant/mmDiagnosis
|
diagnosis1/miso/harmonic/cal_harmonic_climatology.py
|
Python
|
gpl-3.0
| 2,875
|
[
"NetCDF"
] |
bd18fcf2f15f6cfc977682a44e08031f3915525172fc2d89ac13e4706b1864fb
|
import numpy as np
from ase import Atoms, Atom
a = Atoms([Atom('Cu')])
a.positions[:] += 1.0
print(a.get_positions(), a.positions)
a=a+a
a+=a
a.append(Atom('C'))
a += Atoms([])
a += Atom('H', magmom=1)
print(a.get_initial_magnetic_moments())
print(a[0].number)
print(a[[0,1]].get_atomic_numbers())
print(a[np.array([1,1,0,0,1], bool)].get_atomic_numbers())
print(a[::2].get_atomic_numbers())
print(a.get_chemical_symbols())
del a[2]
print(a.get_chemical_symbols())
del a[-2:]
print(a.get_chemical_symbols())
|
suttond/MODOI
|
ase/test/build.py
|
Python
|
lgpl-3.0
| 509
|
[
"ASE"
] |
c5833bc9d4fc17590caa668371c04b0473e405f39b556471a9f754cb0ddf00c3
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
ssaeger/scikit-learn
|
sklearn/mixture/gmm.py
|
Python
|
bsd-3-clause
| 30,702
|
[
"Gaussian"
] |
e440c09286574d29490f9f8430ba67e68b612a63f5c8969907c704f1fe4e1121
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
from commoncode.testcase import FileBasedTesting
from licensedcode import cache
from licensedcode import index
from licensedcode import models
from licensedcode.models import Rule
from licensedcode.query import Query
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class IndexTesting(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def get_test_rules(self, base, subset=None):
base = self.get_test_loc(base)
test_files = sorted(os.listdir(base))
if subset:
test_files = [t for t in test_files if t in subset]
return [Rule(text_file=os.path.join(base, license_key), licenses=[license_key])
for license_key in test_files]
class TestQueryWithSingleRun(IndexTesting):
def test_Query_tokens_by_line_from_string(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx, _test_mode=True)
result = list(qry.tokens_by_line())
expected = [
[],
[None],
[11, 0, 6, 4, 3, 0, 1, 9, 2],
[],
[None, None, None, None],
[None, 0, None],
[None],
]
assert expected == result
# convert tid to actual token strings
qtbl_as_str = lambda qtbl: [[None if tid is None else idx.tokens_by_tid[tid] for tid in tids] for tids in qtbl]
result_str = qtbl_as_str(result)
expected_str = [
[],
[None],
['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted'],
[],
[None, None, None, None],
[None, 'and', None],
[None],
]
assert expected_str == result_str
assert [3, 3, 3, 3, 3, 3, 3, 3, 3, 6] == qry.line_by_pos
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = 'and this is not a license'
qry = Query(query_string=querys, idx=idx, _test_mode=True)
result = list(qry.tokens_by_line())
expected = [['and', None, None, None, None, None]]
assert expected == qtbl_as_str(result)
def test_Query_tokenize_from_string(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted.
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx, _test_mode=True)
qry.tokenize_and_build_runs(qry.tokens_by_line())
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
result = tks_as_str(qry.tokens)
assert expected == result
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and', None, None]
result = tks_as_str(qry.tokens_with_unknowns())
assert expected == result
assert 1 == len(qry.query_runs)
qr1 = qry.query_runs[0]
assert 0 == qr1.start
assert 9 == qr1.end
assert 10 == len(qr1)
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
result = tks_as_str(qr1.tokens)
assert expected == result
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and']
result = tks_as_str(qr1.tokens_with_unknowns())
assert expected == result
def test_QueryRuns_tokens_with_unknowns(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted.
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx)
assert set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) == set(qry.matchables)
assert 1 == len(qry.query_runs)
qrun = qry.query_runs[0]
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
assert expected == tks_as_str(qrun.tokens)
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and']
assert expected == tks_as_str(qrun.tokens_with_unknowns())
assert 0 == qrun.start
assert 9 == qrun.end
def test_QueryRun_does_not_end_with_None(self):
rule_text = 'Redistribution and use in source and binary forms, with or without modification, are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary forms, with or without modification, are permitted.
Always
bar
modification
foo
'''
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
qry = Query(query_string=querys, idx=idx)
expected = [
None,
'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary',
'forms', 'with', 'or', 'without', 'modification', 'are', 'permitted',
None, None,
'modification',
None
]
assert [x for x in expected if x] == tks_as_str(qry.tokens)
assert expected == tks_as_str(qry.tokens_with_unknowns())
assert 2 == len(qry.query_runs)
qrun = qry.query_runs[0]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'forms', 'with', 'or', 'without', 'modification', 'are', 'permitted']
assert expected == tks_as_str(qrun.tokens)
assert 0 == qrun.start
assert 13 == qrun.end
qrun = qry.query_runs[1]
expected = ['modification']
assert expected == tks_as_str(qrun.tokens)
assert 14 == qrun.start
assert 14 == qrun.end
def test_Query_from_real_index_and_location(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/querytokens')
qry = Query(location=query_loc, idx=idx, line_threshold=4)
result = [qr.to_dict() for qr in qry.query_runs]
expected = [
{'end': 35,
'start': 0,
'tokens': (u'redistribution and use in source and binary forms '
u'redistributions of source code must the this that is not '
u'to redistributions in binary form must this software is '
u'provided by the copyright holders and contributors as is')
},
{'end': 36, 'start': 36, 'tokens': u'redistributions'}]
assert expected == result
expected_lbp = [
4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8,
9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 15
]
assert expected_lbp == qry.line_by_pos
def test_query_and_index_tokens_are_identical_for_same_text(self):
rule_dir = self.get_test_loc('query/rtos_exact/')
from licensedcode.models import load_rules
idx = index.LicenseIndex(load_rules(rule_dir))
query_loc = self.get_test_loc('query/rtos_exact/gpl-2.0-freertos.RULE')
index_text_tokens = [idx.tokens_by_tid[t] for t in idx.tids_by_rid[0]]
qry = Query(location=query_loc, idx=idx, line_threshold=4)
wqry = qry.whole_query_run()
query_text_tokens = [idx.tokens_by_tid[t] for t in wqry.tokens]
assert index_text_tokens == query_text_tokens
assert u' '.join(index_text_tokens) == u' '.join(query_text_tokens)
def test_query_run_tokens_with_junk(self):
ranked_toks = lambda : ['the', 'is', 'a']
idx = index.LicenseIndex([Rule(_text='a is the binary')],
_ranked_tokens=ranked_toks)
assert 2 == idx.len_junk
assert {'a': 0, 'the': 1, 'binary': 2, 'is': 3, } == idx.dictionary
# two junks
q = Query(query_string='a the', idx=idx)
assert q.line_by_pos
qrun = q.query_runs[0]
assert [0, 1] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one junk
q = Query(query_string='a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [0, 2] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one junk
q = Query(query_string='binary the', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 1] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one unknown at start
q = Query(query_string='that binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2] == qrun.tokens
assert {-1: 1} == qrun.query.unknowns_by_pos
# one unknown at end
q = Query(query_string='binary that', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2] == qrun.tokens
assert {0: 1} == qrun.query.unknowns_by_pos
# onw unknown in the middle
q = Query(query_string='binary that a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {0: 1} == qrun.query.unknowns_by_pos
# onw unknown in the middle
q = Query(query_string='a binary that a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [0, 2, 0, 2] == qrun.tokens
assert {1: 1} == qrun.query.unknowns_by_pos
# two unknowns in the middle
q = Query(query_string='binary that was a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {0: 2} == qrun.query.unknowns_by_pos
# unknowns at start, middle and end
q = Query(query_string='hello dolly binary that was a binary end really', idx=idx)
# u u u u u u
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {-1: 2, 0: 2, 2: 2} == qrun.query.unknowns_by_pos
def test_query_tokens_are_same_for_different_text_formatting(self):
test_files = [self.get_test_loc(f) for f in [
'queryformat/license2.txt',
'queryformat/license3.txt',
'queryformat/license4.txt',
'queryformat/license5.txt',
'queryformat/license6.txt',
]]
rule_file = self.get_test_loc('queryformat/license1.txt')
idx = index.LicenseIndex([Rule(text_file=rule_file, licenses=['mit'])])
q = Query(location=rule_file, idx=idx)
assert 1 == len(q.query_runs)
expected = q.query_runs[0]
for tf in test_files:
q = Query(tf, idx=idx)
qr = q.query_runs[0]
assert expected.tokens == qr.tokens
def test_query_run_unknowns(self):
idx = index.LicenseIndex([Rule(_text='a is the binary')])
assert {'a': 0, 'binary': 1, 'is': 2, 'the': 3} == idx.dictionary
assert 2 == idx.len_junk
# multiple unknowns at start, middle and end
q = Query(query_string='that new binary was sure a kind of the real mega deal', idx=idx)
# known pos 0 1 2
# abs pos 0 1 2 3 4 5 6 7 8 9 10 11
expected = {
- 1: 2,
0: 2,
1: 2,
2: 3,
}
assert expected == dict(q.unknowns_by_pos)
class TestQueryWithMultipleRuns(IndexTesting):
def test_query_runs_from_location(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/querytokens')
qry = Query(location=query_loc, idx=idx, line_threshold=3)
result = [q.to_dict(brief=True) for q in qry.query_runs]
expected = [
{
'start': 0,
'end': 35,
'tokens': u'redistribution and use in source ... holders and contributors as is'},
{
'start': 36,
'end': 36,
'tokens': u'redistributions'}
]
assert expected == result
def test_query_runs_three_runs(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/queryruns')
qry = Query(location=query_loc, idx=idx)
expected = [
{'end': 84,
'start': 0,
'tokens': u'the redistribution and use in ... 2 1 3 c 4'},
{'end': 97,
'start': 85,
'tokens': u'this software is provided by ... holders and contributors as is'},
{'end': 98, 'start': 98, 'tokens': u'redistributions'}
]
result = [q.to_dict(brief=True) for q in qry.query_runs]
assert expected == result
def test_QueryRun(self):
idx = index.LicenseIndex([Rule(_text='redistributions in binary form must redistributions in')])
qry = Query(query_string='redistributions in binary form must redistributions in', idx=idx)
qruns = qry.query_runs
assert 1 == len(qruns)
qr = qruns[0]
# test
result = [idx.tokens_by_tid[tid] for tid in qr.tokens]
expected = ['redistributions', 'in', 'binary', 'form', 'must', 'redistributions', 'in']
assert expected == result
def test_QueryRun_repr(self):
idx = index.LicenseIndex([Rule(_text='redistributions in binary form must redistributions in')])
qry = Query(query_string='redistributions in binary form must redistributions in', idx=idx)
qruns = qry.query_runs
qr = qruns[0]
# test
expected = 'QueryRun(start=0, len=7, start_line=1, end_line=1)'
assert expected == repr(qr)
expected = 'QueryRun(start=0, len=7, start_line=1, end_line=1, tokens="redistributions in binary form must redistributions in")'
assert expected == qr.__repr__(trace_repr=True)
def test_query_runs_text_is_correct(self):
test_rules = self.get_test_rules('query/full_text/idx',)
idx = index.LicenseIndex(test_rules)
query_loc = self.get_test_loc('query/full_text/query')
qry = Query(location=query_loc, idx=idx, line_threshold=3)
qruns = qry.query_runs
result = [[u'<None>' if t is None else idx.tokens_by_tid[t] for t in qr.tokens_with_unknowns()] for qr in qruns]
expected = [
u'<None> <None> <None> this'.split(),
u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the
above copyright notice this list of conditions and the following
disclaimer redistributions in binary form must reproduce the above
copyright notice this list of conditions and the following
disclaimer in the documentation and or other materials provided with
the distribution neither the name of <None> inc nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission this
software is provided by the copyright holders and contributors as is
and any express or implied warranties including but not limited to
the implied warranties of merchantability and fitness for a
particular purpose are disclaimed in no event shall the copyright
owner or contributors be liable for any direct indirect incidental
special exemplary or consequential damages including but not limited
to procurement of substitute goods or services loss of use data or
profits or business interruption however caused and on any theory of
liability whether in contract strict liability or tort including
negligence or otherwise arising in any way out of the use of this
software even if advised of the possibility of such damage'''.split(),
u'no <None> of'.split(),
]
assert expected == result
def test_query_runs_with_plain_rule(self):
rule_text = u'''X11 License
Copyright (C) 1996 X Consortium Permission is hereby granted, free
of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following
conditions: The above copyright notice and this permission notice
shall be included in all copies or substantial portions of the
Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the X Consortium
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in this Software without prior written
authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
qry = Query(location=query_loc, idx=idx)
result = [q.to_dict(brief=False) for q in qry.query_runs]
expected = [{
'start': 0,
'end': 216,
'tokens':(
u'x11 license copyright c 1996 x consortium permission is hereby '
u'granted free of charge to any person obtaining a copy of this '
u'software and associated documentation files the software to deal in '
u'the software without restriction including without limitation the '
u'rights to use copy modify merge publish distribute sublicense and or '
u'sell copies of the software and to permit persons to whom the '
u'software is furnished to do so subject to the following conditions '
u'the above copyright notice and this permission notice shall be '
u'included in all copies or substantial portions of the software the '
u'software is provided as is without warranty of any kind express or '
u'implied including but not limited to the warranties of '
u'merchantability fitness for a particular purpose and noninfringement '
u'in no event shall the x consortium be liable for any claim damages or '
u'other liability whether in an action of contract tort or otherwise '
u'arising from out of or in connection with the software or the use or '
u'other dealings in the software except as contained in this notice the '
u'name of the x consortium shall not be used in advertising or '
u'otherwise to promote the sale use or other dealings in this software '
u'without prior written authorization from the x consortium x window '
u'system is a trademark of x consortium inc'
)
}]
assert 217 == len(qry.query_runs[0].tokens)
assert expected == result
def test_query_run_has_correct_offset(self):
rule_dir = self.get_test_loc('query/runs/rules')
rules = list(models.load_rules(rule_dir))
idx = index.LicenseIndex(rules)
query_doc = self.get_test_loc('query/runs/query.txt')
q = Query(location=query_doc, idx=idx, line_threshold=4)
result = [qr.to_dict() for qr in q.query_runs]
expected = [
{'end': 0, 'start': 0, 'tokens': u'inc'},
{'end': 123, 'start': 1,
'tokens': (
u'this library is free software you can redistribute it and or modify '
u'it under the terms of the gnu library general public license as '
u'published by the free software foundation either version 2 of the '
u'license or at your option any later version this library is '
u'distributed in the hope that it will be useful but without any '
u'warranty without even the implied warranty of merchantability or '
u'fitness for a particular purpose see the gnu library general public '
u'license for more details you should have received a copy of the gnu '
u'library general public license along with this library see the file '
u'copying lib if not write to the free software foundation inc 51 '
u'franklin street fifth floor boston ma 02110 1301 usa')
}
]
assert expected == result
def test_query_run_and_tokenizing_breaking_works__with_plus_as_expected(self):
rule_dir = self.get_test_loc('query/run_breaking/rules')
rules = list(models.load_rules(rule_dir))
idx = index.LicenseIndex(rules)
query_doc = self.get_test_loc('query/run_breaking/query.txt')
q = Query(query_doc, idx=idx)
result = [qr.to_dict() for qr in q.query_runs]
expected = [
{'end': 121, 'start': 0,
'tokens':
'this library is free software you can redistribute it '
'and or modify it under the terms of the gnu library '
'general public license as published by the free software '
'foundation either version 2 of the license or at your '
'option any later version this library is distributed in '
'the hope that it will be useful but without any warranty '
'without even the implied warranty of merchantability or '
'fitness for a particular purpose see the gnu library '
'general public license for more details you should have '
'received a copy of the gnu library general public '
'license along with this library see the file copying lib '
'if not write to the free software foundation 51 franklin '
'street fifth floor boston ma 02110 1301 usa'}
]
assert expected == result
q.tokens
# check rules token are the same exact set as the set of the last query run
txtid = idx.tokens_by_tid
qrt = [txtid[t] for t in q.query_runs[-1].tokens]
irt = [txtid[t] for t in idx.tids_by_rid[0]]
assert irt == qrt
class TestQueryWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_query_from_binary_lkms_1(self):
location = self.get_test_loc('query/ath_pci.ko')
idx = cache.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 15
def test_query_from_binary_lkms_2(self):
location = self.get_test_loc('query/eeepc_acpi.ko')
idx = cache.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 500
qrs = result.query_runs[5:10]
assert any('license gpl' in u' '.join(idx.tokens_by_tid[t] for t in qr.matchable_tokens())
for qr in qrs)
def test_query_from_binary_lkms_3(self):
location = self.get_test_loc('query/wlan_xauth.ko')
idx = cache.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 900
qr = result.query_runs[0]
assert 'license dual bsd gpl' in u' '.join(idx.tokens_by_tid[t] for t in qr.matchable_tokens())
def test_query_run_tokens(self):
query_s = u' '.join(u''' 3 unable to create proc entry license gpl
description driver author eric depends 2 6 24 19 generic smp mod module acpi
baridationally register driver proc acpi disabled acpi install notify acpi baridationally get
status cache caches create proc entry baridationally generate proc event acpi evaluate
object acpi remove notify remove proc entry acpi baridationally driver acpi acpi gcc gnu
4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack pointer current
stack pointer this module end usr src modules acpi include linux include asm
include asm generic include acpi acpi c posix types 32 h types h types h h h
h h
'''.split())
idx = cache.get_index()
result = Query(query_string=query_s, idx=idx)
assert 1 == len(result.query_runs)
qr = result.query_runs[0]
# NOTE: this is not a token present in any rules or licenses
unknown_tokens = ('baridationally',)
assert unknown_tokens not in idx.dictionary
assert u' '.join([t for t in query_s.split() if t not in unknown_tokens]) == u' '.join(idx.tokens_by_tid[t] for t in qr.tokens)
def test_query_run_tokens_matchable(self):
idx = cache.get_index()
# NOTE: this is not a token present in any rules or licenses
unknown_token = u'baridationally'
assert unknown_token not in idx.dictionary
query_s = u' '.join(u'''
3 unable to create proc entry license gpl description driver author eric
depends 2 6 24 19 generic smp mod module acpi baridationally register driver
proc acpi disabled acpi install notify acpi baridationally get status cache
caches create proc entry baridationally generate proc event acpi evaluate
object acpi remove notify remove proc entry acpi baridationally driver acpi
acpi gcc gnu 4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack
pointer current stack pointer this module end usr src modules acpi include
linux include asm include asm generic include acpi acpi c posix types 32 h
types h types h h h h h
'''.split())
result = Query(query_string=query_s, idx=idx)
assert 1 == len(result.query_runs)
qr = result.query_runs[0]
expected_qr0 = u' '.join(u'''
3 unable to create proc entry license gpl description driver author eric
depends 2 6 24 19 generic smp mod module acpi register driver
proc acpi disabled acpi install notify acpi get status cache
caches create proc entry generate proc event acpi evaluate
object acpi remove notify remove proc entry acpi driver acpi
acpi gcc gnu 4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack
pointer current stack pointer this module end usr src modules acpi include
linux include asm include asm generic include acpi acpi c posix types 32 h
types h types h h h h h
'''.split())
assert expected_qr0 == u' '.join(idx.tokens_by_tid[t] for t in qr.tokens)
assert expected_qr0 == u' '.join(idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.matchables)
# only gpl is in high matchables
expected = u'gpl'
assert expected == u' '.join(idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.high_matchables)
|
yashdsaraf/scancode-toolkit
|
tests/licensedcode/test_query.py
|
Python
|
apache-2.0
| 30,761
|
[
"VisIt"
] |
e40b94291d06f71c02d479ccf452e121c607891f9b716b122da08d65605b9bea
|
from jinja2 import Template
import os
class ConfigBcbio(object):
def __init__(self):
# some runs from ERP002442 study
# normal, cancer
self.list_of_runs = [('ERR256781', 'normal'), ('ERR256782', 'cancer'),
('ERR256783', 'normal'), ('ERR256784', 'cancer'),
('ERR256785', 'normal'), ('ERR256786', 'cancer'),
('ERR256787', 'normal'), ('ERR256788', 'cancer'),
('ERR256789', 'normal'), ('ERR256790', 'cancer')]
self.header = '''
# This yaml file was automaticately generated.
---
details:'''
self.footer = '''
fc_date: '2014-01-06'
fc_name: cancer
upload:
dir: ../final
'''
self.sample = '''
- algorithm:
aligner: bwa
coverage_interval: regional
mark_duplicates: false
recalibrate: false
realign: false
platform: illumina
quality_format: standard
variant_regions: ../input/ERP002442-targeted.bed
variantcaller: freebayes
analysis: variant2
description: {{ sample }}
files:
- ../input/{{ sample }}_1.fastq.gz
- ../input/{{ sample }}_2.fastq.gz
genome_build: GRCh37
metadata:
batch: batch1
phenotype: {{ phenotype }}
'''
def generate_conf(self, data_dir, fname):
f1 = os.path.join(data_dir, fname)
with open(f1, "w") as c_file:
c_file.write(self.header)
t1 = Template(self.sample)
for s, p in self.list_of_runs:
c_file.write(t1.render(sample=s, phenotype=p))
c_file.write(self.footer)
if __name__ == '__main__':
conf = ConfigBcbio()
conf.generate_conf("/home/jkern/scratch/230", 'cancer-normal.yaml')
|
kern3020/incubator
|
cancer-normal-setup/generate_conf.py
|
Python
|
mit
| 1,760
|
[
"BWA"
] |
f66da6f75babacc268bab1f61aa9c8b8705cbc4dfbe218aa14a847c36bce342a
|
#-*- coding: utf-8 -*-
'''
GATK-based variant calling pipeline
It implements a workflow pipeline for next generation
sequencing variant detection using the Broad Institute's GATK
for variant calling.
The pipeline is configured by an options file in a python file
including the actual command which are run at each stage.
'''
import logging
from collections import defaultdict
from utils import parse_and_link
from commands import *
import subprocess
import sys
import os
import argparse
from yaml import load
import glob
fastq_metadata = defaultdict(dict)
def get_logger(logfile, level=logging.INFO):
log_handler = open(logfile, 'w')
logging.basicConfig(stream=log_handler, level=level)
return log_handler
def get_fastq_files(fc_dir, work_dir):
fastq_files = []
fastqz_files = glob.glob('%s/*.fastq.gz' % fc_dir)
if len(fastqz_files) < 1:
exit('At least one sequence file must be specified')
#now let's parse the metadata from each fastq.gz input files and
#construct the symbolic links to them.
for fastqz in fastqz_files:
symb_link = parse_and_link(fastqz, work_dir, fastq_metadata)
fastq_files.append(symb_link)
return fastq_files
def extract_fastq_files(sample_folder):
gz_files = glob.glob('%s/*.fastq.gz' % sample_folder)
for gz_file in gz_files:
subprocess.call(['gzip', '-f', '-d', gz_file])
return glob.glob('%s/*.fastq' % sample_folder)
def next_sample(fastqz_files):
all_sample_folders = [fastq_metadata[os.path.splitext(os.path.basename(fastqz_file))[0]]['out_dir']
for fastqz_file in fastqz_files]
for sample in list(set(all_sample_folders)):
yield sample
def run(global_config, fc_dir, work_dir, tools_dir, workflow_config, reference, dbsnp, is_picard_to_bam):
#1. Get all fasta files and check if there is at least one to process.
sequence_files = get_fastq_files(fc_dir, work_dir)
#2. Create reference database
make_reference_database(workflow_config['indexer']['command'],'bwtsw', reference)
#3.Index the reference
index_reference(reference)
for sample_folder in next_sample(sequence_files):
#4. create output subdirectories
fastqc_dir = make_output_dir(os.path.join(sample_folder, 'fastqc'))
sambam_dir = make_output_dir(os.path.join(sample_folder, 'alignments'))
variant_dir = make_output_dir(os.path.join(sample_folder, 'variant_calls'))
coverage_dir = make_output_dir(os.path.join(sample_folder, 'coverage'))
annovar_dir = make_output_dir(os.path.join(sample_folder, 'annovar'))
results_dir = make_output_dir(os.path.join(sample_folder, 'results'))
#5.Precheck - extracting fastqfiles.
sequence_files = sorted(extract_fastq_files(sample_folder))
#5.Run fastqc on each fastq file.
#fastqc(workflow_config['fastqc']['command'], sequence_files, fastq_metadata, fastqc_dir)
#6. Align sequence to the reference database.
if len(sequence_files) == 2:
#two paired-end fastq files alignment.
fastq_file, pair_file = sequence_files
if can_pipe(workflow_config['seqtk']['command'], fastq_file):
sam_output = align_with_mem(workflow_config['bwamem']['command'], global_config['bwa']['threads'],
reference, fastq_file, pair_file, fastq_metadata, sambam_dir)
else:
#two paired-end fastq files alignment not piped.
sai_fastq_file = align(workflow_config['aligner']['command'], global_config['bwa']['threads'],
reference, fastq_file, fastq_metadata, sambam_dir)
sai_pair_file = align(workflow_config['aligner']['command'], global_config['bwa']['threads'],
reference, pair_file, fastq_metadata, sambam_dir)
sam_output = alignPE2sam(workflow_config['sampe']['command'], reference, fastq_file,
pair_file, sai_fastq_file, sai_pair_file, fastq_metadata, sambam_dir)
elif len(sequence_files) == 1:
#one fastq file alignment.
fastq_file = sequence_files[0]
sai_fastq_file = align(workflow_config['aligner']['command'], global_config['bwa']['threads'],
reference, fastq_file, fastq_metadata, sambam_dir)
sam_output = align2sam(workflow_config['samse']['command'], reference, fastq_file, sai_fastq_file,
fastq_metadata, sambam_dir)
else:
print('Multiple or no fastq files, please check the input files at %s' % sample_folder)
continue
#7. Convert SAM to BAM
if (is_picard_to_bam):
#if it is picard the tool to convert to bam
seq_bam = samP2bam(workflow_config['samP2bam']['command'], global_config['picard']['jvm_opts'],
os.path.join(tools_dir, 'picard-tools-1.109'), sam_output, sambam_dir)
else:
seq_bam = samS2bam(workflow_config['samS2bam']['command'], global_config['samtools']['memory'],
global_config['samtools']['threads'], sam_output, sambam_dir)
#After using samtools to convert to SAM let's use the samtools index to index it.
index_bam = indexbam(workflow_config['bamindexer']['command'], seq_bam, sambam_dir)
#7. Mark PCR Duplicates
marked_seq_bam = dedup(workflow_config['markduplicates']['command'], global_config['picard']['jvm_opts'],
os.path.join(tools_dir, 'picard-tools-1.109'), seq_bam, sambam_dir)
'''
#8. Find suspect intervals for realignment.
bam_list = realign_intervals(workflow_config['realigner']['command'], global_config['gatk']['jvm_opts'],
tools_dir, reference, marked_seq_bam, work_dir)
#9. Run local realignment around indels.
realigned_bam = realign(workflow_config['indelrealigner']['command'], global_config['gatk']['jvm_opts'],
tools_dir, reference, marked_seq_bam, bam_list, work_dir)
#10. Fix mate information
realigned_bam = fix_mate(workflow_config['fixmates']['command'], global_config['picard']['jvm_opts'],
os.path.join(tools_dir, 'picard-tools-1.109'), realigned_bam, work_dir)
#11. Count Covariates
recal_file = base_qual_recal_count(workflow_config['countcovariates']['command'], global_config['gatk']['jvm_opts'],
tools_dir, reference, dbsnp, realigned_bam, work_dir)
#12. Table Recalibration
realigned_bam = base_qual_recal_tabulate(workflow_config['recaltabulate']['command'], global_config['gatk']['jvm_opts'],
tools_dir, reference, recal_file, realigned_bam, work_dir'
#13. Call Snps
#output_vcf = call_snps(workflow_config['callSNPs']['command'], global_config['gatk']['jvm_opts'], global_config['gatk']['threads'],
# tools_dir, reference, dbsnp, '10.0', '10.0', '5000', '3', realigned_bam, work_dir)
output_vcf = '/Users/marcelcaraciolo/Projects/genomika/github/nextgen-pipeline/3806_S10_L001_R1_001.marked.realigned.fixed.recal.vcf'
#14. Filter Snps
filtered_vcf = filter_snps(workflow_config['filterSNPs']['command'], global_config['gatk']['jvm_opts'],
tools_dir, reference, output_vcf, '', work_dir)
#15.Converting the vcf to .annovar format file
annovar_file = convert2annovar(workflow_config['convertAnnovar']['command'], tools_dir, filtered_vcf, work_dir)
#16. Annotate annnovar file using Annovar
annotation_file = annotate(workflow_config['annotate']['command'], tools_dir, annovar_file, work_dir)
#17. Summarize annovar file using Annovar
summary_file = summarize(workflow_config['summarize']['command'], tools_dir, annovar_file,
'1000g2012apr', '6500','137', 'refgene', 'hg19', work_dir)
'''
def parse_cl_args():
'''Parse input commandline arguments, handling multiple cases.
Returns the main config file and set of kwargs
'''
parser = argparse.ArgumentParser(description =
'Simple fully automated sequencing analysis')
parser.add_argument('global_config', help='Global YAML configuration file specifying details '
'about the system')
parser.add_argument('fc_dir', help='A directory of fastq files to process (optional)',
nargs = "?")
parser.add_argument('workflow', help='YAML file with details about the pipeline workflow', nargs='?')
parser.add_argument('--reference', help="Human genome Reference to use as base.",
default = 'hg19')
parser.add_argument('--dbsnp', help="dbSnp Reference to use as base.")
parser.add_argument('--workdir', help="Directory to process in. Defaults to current working directory",
default = os.getcwd())
parser.add_argument('--tooldir', help="Directory where the tools are in. Defaults to current working directory",
default = os.getcwd())
parser.add_argument('--bamconverter', help='Tool to convert sam 2 bam.', choices=['samtools', 'picard'], default = 'samtools')
parser.add_argument('--logdir', help="Directory where the log files will be stored. Defaults to current working directory",
default = os.getcwd())
return parser
def make_output_dir(dir):
if not os.path.exists(dir):
print('Creating folder %s' % dir)
try:
os.mkdir(dir, 0777)
except IOError, e:
raise IOError('%s\nFailed to make the directory %s' (e, dir))
return dir
def main(args, sys_args , parser):
#read the global config and extract info.
if os.path.exists(args.global_config):
with open(args.global_config) as f:
contents = f.read()
newConfig = load(contents)
else:
raise IOError('GLobal YAML config file not found')
#Check the input directory
if args.fc_dir:
fc_dir = os.path.abspath(args.fc_dir)
else:
fc_dir = os.getcwd()
#Create the output directory
work_dir = os.path.abspath(args.workdir)
make_output_dir(work_dir)
is_picard_to_bam = False if args.bamconverter == 'samtools' else True
if args.workflow:
with open(args.workflow) as f:
contents = f.read()
workflowConfig = load(contents)
else:
with open('workflow.yaml') as f:
contents = f.read()
workflowConfig = load(contents)
if not args.dbsnp:
raise IOError('dbsnp reference file not found')
#check where tools dir is.
tools_dir = os.path.abspath(args.tooldir)
#start the logger
make_output_dir(args.logdir)
logger = get_logger(os.path.join(args.logdir, 'pipeline.log'))
run(newConfig['resources'], fc_dir, work_dir, tools_dir, workflowConfig['stages']['algorithm'], args.reference,
os.path.abspath(args.dbsnp), is_picard_to_bam)
if __name__ == '__main__':
parser = parse_cl_args()
main(parser.parse_args(), sys.argv[1:], parser)
|
marcelcaraciolo/nextgen-pipeline
|
pipeline.py
|
Python
|
mit
| 11,293
|
[
"BWA"
] |
39ca639b32a87bab180db11b6a30ad0e44e4a70eb5b7b196abdfe3a589ccec4e
|
#!/usr/bin/env python
from numpy import deg2rad, rad2deg
from traits.api import HasTraits, Range, Instance, \
on_trait_change, Float, Property, File, Bool, Button
from traitsui.api import \
View, Item, VSplit, VGroup, HSplit, HGroup, Group, Label
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, SceneEditor
from .human import Human
sliders = Human.CFGnames
def format_func(value):
return '{:1.3}'.format(value)
class YeadonGUI(HasTraits):
"""A GUI for the yeadon module, implemented using the traits package."""
# Input.
measurement_file_name = File()
# Drawing options.
show_mass_center = Bool(False)
show_inertia_ellipsoid = Bool(False)
# Configuration variables.
opts = {'enter_set': True, 'auto_set': True, 'mode': 'slider'}
for name, bounds in zip(Human.CFGnames, Human.CFGbounds):
# TODO : Find a better way than using locals here, it may not be a good
# idea, but I don't know the consequences.
locals()[name] = Range(float(rad2deg(bounds[0])),
float(rad2deg(bounds[1])), 0.0, **opts)
reset_configuration = Button()
# Display of Human object properties.
Ixx = Property(Float, depends_on=sliders)
Ixy = Property(Float, depends_on=sliders)
Ixz = Property(Float, depends_on=sliders)
Iyx = Property(Float, depends_on=sliders)
Iyy = Property(Float, depends_on=sliders)
Iyz = Property(Float, depends_on=sliders)
Izx = Property(Float, depends_on=sliders)
Izy = Property(Float, depends_on=sliders)
Izz = Property(Float, depends_on=sliders)
x = Property(Float, depends_on=sliders)
y = Property(Float, depends_on=sliders)
z = Property(Float, depends_on=sliders)
scene = Instance(MlabSceneModel, args=())
input_group = Group(Item('measurement_file_name'))
vis_group = Group(Item('scene',
editor=SceneEditor(scene_class=MayaviScene), height=580, width=430,
show_label=False))
config_first_group = Group(
Item('somersault'),
Item('tilt'),
Item('twist'),
Item('PTsagittalFlexion', label='PT sagittal flexion'),
Item('PTbending', label='PT bending'),
Item('TCspinalTorsion', label='TC spinal torsion'),
Item('TCsagittalSpinalFlexion',
label='TC sagittal spinal flexion'),
label='Whole-body, pelvis, torso',
dock='tab',
)
config_upper_group = Group(
Item('CA1extension', label='CA1 extension'),
Item('CA1adduction', label='CA1 adduction'),
Item('CA1rotation', label='CA1 rotation'),
Item('CB1extension', label='CB1 extension'),
Item('CB1abduction', label='CB1 abduction'),
Item('CB1rotation', label='CB1 rotation'),
Item('A1A2extension', label='A1A2 extension'),
Item('B1B2extension', label='B1B2 extension'),
label='Upper limbs',
dock='tab',
)
config_lower_group = Group(
Item('PJ1extension', label='PJ1 extension'),
Item('PJ1adduction', label='PJ1 adduction'),
Item('PK1extension', label='PK1 extension'),
Item('PK1abduction', label='PK1 abduction'),
Item('J1J2flexion', label='J1J2 flexion'),
Item('K1K2flexion', label='K1K2 flexion'),
label='Lower limbs',
dock='tab',
)
config_group = VGroup(
Label('Configuration'),
Group(config_first_group, config_upper_group, config_lower_group,
layout='tabbed',
),
Item('reset_configuration', show_label=False),
Label('P: pelvis (red); T: thorax (orange); C: chest-head (yellow)'),
Label('A1/A2: left upper arm/forearm-hand; B1/B2: right arm'),
Label('J1/J2: left thigh/shank-foot; K1/K2: right leg'),
show_border=True,
)
inertia_prop = VGroup(
Label('Mass center (from origin of coord. sys.) (m):'),
HGroup(
Item('x', style='readonly', format_func=format_func),
Item('y', style='readonly', format_func=format_func),
Item('z', style='readonly', format_func=format_func)
),
Label('Inertia tensor (about origin, in basis shown) (kg-m^2):'),
HSplit( # HSplit 2
Group(
Item('Ixx', style='readonly', format_func=format_func),
Item('Iyx', style='readonly', format_func=format_func),
Item('Izx', style='readonly', format_func=format_func),
),
Group(
Item('Ixy', style='readonly', format_func=format_func),
Item('Iyy', style='readonly', format_func=format_func),
Item('Izy', style='readonly', format_func=format_func),
),
Group(
Item('Ixz', style='readonly', format_func=format_func),
Item('Iyz', style='readonly', format_func=format_func),
Item('Izz', style='readonly', format_func=format_func)
),
), # end HSplit 2
Label('X, Y, Z axes drawn as red, green, blue arrows, respectively.'),
show_border=True,
) # end VGroup
view = View(
VSplit(
input_group,
HSplit(vis_group,
VSplit(
config_group,
Item('show_mass_center'),
Item('show_inertia_ellipsoid'),
inertia_prop
)
),
),
resizable=True,
title='Yeadon human inertia model'
) # end View
measPreload = { 'Ls5L' : 0.545, 'Lb2p' : 0.278, 'La5p' : 0.24, 'Ls4L' :
0.493, 'La5w' : 0.0975, 'Ls4w' : 0.343, 'La5L' : 0.049, 'Lb2L' : 0.2995,
'Ls4d' : 0.215, 'Lj2p' : 0.581, 'Lb5p' : 0.24, 'Lb5w' : 0.0975, 'Lk8p' :
0.245, 'Lk8w' : 0.1015, 'Lj5L' : 0.878, 'La6w' : 0.0975, 'Lk1L' : 0.062,
'La6p' : 0.2025, 'Lk1p' : 0.617, 'La6L' : 0.0805, 'Ls5p' : 0.375, 'Lj5p' :
0.2475, 'Lk8L' : 0.1535, 'Lb5L' : 0.049, 'La3p' : 0.283, 'Lj9w' : 0.0965,
'La4w' : 0.055, 'Ls6L' : 0.152, 'Lb0p' : 0.337, 'Lj8w' : 0.1015, 'Lk2p' :
0.581, 'Ls6p' : 0.53, 'Lj9L' : 0.218, 'La3L' : 0.35, 'Lj8p' : 0.245, 'Lj3L'
: 0.449, 'La4p' : 0.1685, 'Lk3L' : 0.449, 'Lb3p' : 0.283, 'Ls7L' : 0.208,
'Ls7p' : 0.6, 'Lb3L' : 0.35, 'Lk3p' : 0.3915, 'La4L' : 0.564, 'Lj8L' :
0.1535, 'Lj3p' : 0.3915, 'Lk4L' : 0.559, 'La1p' : 0.2915, 'Lb6p' : 0.2025,
'Lj6L' : 0.05, 'Lb6w' : 0.0975, 'Lj6p' : 0.345, 'Lb6L' : 0.0805, 'Ls0p' :
0.97, 'Ls0w' : 0.347, 'Lj6d' : 0.122, 'Ls8L' : 0.308, 'Lk5L' : 0.878,
'La2p' : 0.278, 'Lj9p' : 0.215, 'Ls1L' : 0.176, 'Lj1L' : 0.062, 'Lb1p' :
0.2915, 'Lj1p' : 0.617, 'Ls1p' : 0.865, 'Ls1w' : 0.317, 'Lk4p' : 0.34,
'Lk5p' : 0.2475, 'La2L' : 0.2995, 'Lb4w' : 0.055, 'Lb4p' : 0.1685, 'Lk9p' :
0.215, 'Lk9w' : 0.0965, 'Ls2p' : 0.845, 'Lj4L' : 0.559, 'Ls2w' : 0.285,
'Lk6L' : 0.05, 'La7w' : 0.047, 'La7p' : 0.1205, 'La7L' : 0.1545, 'Lk6p' :
0.345, 'Ls2L' : 0.277, 'Lj4p' : 0.34, 'Lk6d' : 0.122, 'Lk9L' : 0.218,
'Lb4L' : 0.564, 'La0p' : 0.337, 'Ls3w' : 0.296, 'Ls3p' : 0.905, 'Lb7p' :
0.1205, 'Lb7w' : 0.047, 'Lj7p' : 0.252, 'Lb7L' : 0.1545, 'Ls3L' : 0.388,
'Lk7p' : 0.252 }
def __init__(self, meas_in=None):
HasTraits.__init__(self, trait_value=True)
if meas_in:
measurement_file_name = meas_in
else:
measurement_file_name = 'Path to measurement input text file.'
self.H = Human(meas_in if meas_in else self.measPreload)
self._init_draw_human()
def _init_draw_human(self):
self.H.draw(self.scene.mlab, True)
if self.show_mass_center:
self.H._draw_mayavi_mass_center_sphere(self.scene.mlab)
if self.show_inertia_ellipsoid:
self.H._draw_mayavi_inertia_ellipsoid(self.scene.mlab)
@on_trait_change('scene.activated')
def set_view(self):
"""Sets a reasonable camera angle for the intial view."""
self.scene.mlab.view(azimuth=90.0, elevation=-90.0)
def _get_Ixx(self):
return self.H.inertia[0, 0]
def _get_Ixy(self):
return self.H.inertia[0, 1]
def _get_Ixz(self):
return self.H.inertia[0, 2]
def _get_Iyx(self):
return self.H.inertia[1, 0]
def _get_Iyy(self):
return self.H.inertia[1, 1]
def _get_Iyz(self):
return self.H.inertia[1, 2]
def _get_Izx(self):
return self.H.inertia[2, 0]
def _get_Izy(self):
return self.H.inertia[2, 1]
def _get_Izz(self):
return self.H.inertia[2, 2]
def _get_x(self):
return self.H.center_of_mass[0, 0]
def _get_y(self):
return self.H.center_of_mass[1, 0]
def _get_z(self):
return self.H.center_of_mass[2, 0]
@on_trait_change('measurement_file_name')
def _update_measurement_file_name(self):
# Must convert to str (from unicode), because Human parses it
# differently depending on its type, and there's no consideration for
# it being unicode.
self.H = Human(str(self.measurement_file_name))
self.scene.mlab.clf()
self._init_draw_human()
@on_trait_change('show_inertia_ellipsoid')
def _update_show_inertia_ellipsoid(self):
if self.show_inertia_ellipsoid:
self.H._draw_mayavi_inertia_ellipsoid(self.scene.mlab)
else:
self.H._ellipsoid_mesh.remove()
def _maybe_update_inertia_ellipsoid(self):
if self.show_inertia_ellipsoid:
self.H._update_mayavi_inertia_ellipsoid()
@on_trait_change('show_mass_center')
def _update_show_mass_center(self):
if self.show_mass_center:
self.H._draw_mayavi_mass_center_sphere(self.scene.mlab)
else:
self.H._mass_center_sphere.remove()
def _maybe_update_mass_center(self):
if self.show_mass_center:
self.H._update_mayavi_mass_center_sphere()
@on_trait_change('reset_configuration')
def _update_reset_configuration(self):
# TODO: This is really slow because it sets every trait one by one. It
# would be nice to set them all to zero and only call the redraw once.
for cfg in sliders:
setattr(self, cfg, self.trait(cfg).default_value()[1])
@on_trait_change('somersault')
def _update_somersault(self):
self.H.set_CFG('somersault', deg2rad(self.somersault))
self._update_mayavi(['P', 'T', 'C', 'A1', 'A2', 'B1', 'B2', 'J1', 'J2',
'K1', 'K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('tilt')
def _update_tilt(self):
self.H.set_CFG('tilt', deg2rad(self.tilt))
self._update_mayavi(['P', 'T', 'C', 'A1', 'A2', 'B1', 'B2', 'J1', 'J2',
'K1', 'K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('twist')
def _update_twist(self):
self.H.set_CFG('twist', deg2rad(self.twist))
self._update_mayavi(['P', 'T', 'C', 'A1', 'A2', 'B1', 'B2', 'J1', 'J2',
'K1', 'K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PTsagittalFlexion')
def _update_PTsagittalFlexion(self):
self.H.set_CFG('PTsagittalFlexion', deg2rad(self.PTsagittalFlexion))
self._update_mayavi(['T', 'C', 'A1', 'A2', 'B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PTbending')
def _update_PTFrontalFlexion(self):
self.H.set_CFG('PTbending', deg2rad(self.PTbending))
self._update_mayavi(['T', 'C', 'A1', 'A2', 'B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('TCspinalTorsion')
def _update_TCSpinalTorsion(self):
self.H.set_CFG('TCspinalTorsion', deg2rad(self.TCspinalTorsion))
self._update_mayavi(['C', 'A1', 'A2', 'B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('TCsagittalSpinalFlexion')
def _update_TCLateralSpinalFlexion(self):
self.H.set_CFG('TCsagittalSpinalFlexion',
deg2rad(self.TCsagittalSpinalFlexion))
self._update_mayavi(['C', 'A1', 'A2', 'B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CA1extension')
def _update_CA1extension(self):
self.H.set_CFG('CA1extension', deg2rad(self.CA1extension))
self._update_mayavi(['A1', 'A2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CA1adduction')
def _update_CA1adduction(self):
self.H.set_CFG('CA1adduction', deg2rad(self.CA1adduction))
self._update_mayavi(['A1', 'A2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CA1rotation')
def _update_CA1rotation(self):
self.H.set_CFG('CA1rotation', deg2rad(self.CA1rotation))
self._update_mayavi(['A1', 'A2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CB1extension')
def _update_CB1extension(self):
self.H.set_CFG('CB1extension', deg2rad(self.CB1extension))
self._update_mayavi(['B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CB1abduction')
def _update_CB1abduction(self):
self.H.set_CFG('CB1abduction', deg2rad(self.CB1abduction))
self._update_mayavi(['B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('CB1rotation')
def _update_CB1rotation(self):
self.H.set_CFG('CB1rotation', deg2rad(self.CB1rotation))
self._update_mayavi(['B1', 'B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('A1A2extension')
def _update_A1A2extension(self):
self.H.set_CFG('A1A2extension', deg2rad(self.A1A2extension))
self._update_mayavi(['A2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('B1B2extension')
def _update_B1B2extension(self):
self.H.set_CFG('B1B2extension', deg2rad(self.B1B2extension))
self._update_mayavi(['B2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PJ1extension')
def _update_PJ1extension(self):
self.H.set_CFG('PJ1extension', deg2rad(self.PJ1extension))
self._update_mayavi(['J1', 'J2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PJ1adduction')
def _update_PJ1adduction(self):
self.H.set_CFG('PJ1adduction', deg2rad(self.PJ1adduction))
self._update_mayavi(['J1', 'J2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PK1extension')
def _update_PK1extension(self):
self.H.set_CFG('PK1extension', deg2rad(self.PK1extension))
self._update_mayavi(['K1', 'K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('PK1abduction')
def _update_PK1abduction(self):
self.H.set_CFG('PK1abduction', deg2rad(self.PK1abduction))
self._update_mayavi(['K1', 'K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('J1J2flexion')
def _update_J1J2flexion(self):
self.H.set_CFG('J1J2flexion', deg2rad(self.J1J2flexion))
self._update_mayavi(['J2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
@on_trait_change('K1K2flexion')
def _update_K1K2flexion(self):
self.H.set_CFG('K1K2flexion', deg2rad(self.K1K2flexion))
self._update_mayavi(['K2'])
self._maybe_update_mass_center()
self._maybe_update_inertia_ellipsoid()
def _update_mayavi(self, segments):
"""Updates all of the segments and solids."""
for affected in segments:
seg = self.H.get_segment_by_name(affected)
for solid in seg.solids:
solid._mesh.scene.disable_render = True
for affected in segments:
self.H.get_segment_by_name(affected)._update_mayavi()
for affected in segments:
seg = self.H.get_segment_by_name(affected)
for solid in seg.solids:
solid._mesh.scene.disable_render = False
def start_gui(*args, **kwargs):
'''Start the GUI. The GUI automatically creates a Human, and lets the user
modify its configuration and observe the resulting change in the human's
inertia properties.
Parameters
----------
meas_in : str, optional
The filename of a measurements file to use for the human.
'''
gui = YeadonGUI(*args, **kwargs)
gui.configure_traits()
if __name__ == '__main__':
start_gui()
|
chrisdembia/yeadon
|
yeadon/gui.py
|
Python
|
bsd-3-clause
| 17,580
|
[
"Mayavi"
] |
dda67caa69e6e151e47ec8e2cdbafd0ab8427fd6e5d652ec9177f968ac97fac0
|
"""Command-line interface for crowdastro.
Matthew Alger
The Australian National University
2016
"""
import argparse
import logging
import sys
from crowdastro import __description__
from crowdastro import __version__
from crowdastro import compile_cnn
from crowdastro import consensuses
from crowdastro import generate_annotator_labels
from crowdastro import generate_cnn_outputs
from crowdastro import generate_dataset
from crowdastro import generate_test_sets
from crowdastro import generate_training_data
from crowdastro import import_data
from crowdastro import repack_h5
from crowdastro import test
from crowdastro import train
from crowdastro import train_cnn
def main():
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'--verbose', '--v', '-v', action='store_true', help='verbose output')
parser.add_argument(
'--version', action='store_true', help='get version number')
subparsers = parser.add_subparsers(dest='subcommand')
parser_compile_cnn = subparsers.add_parser(
'compile_cnn', help='compile a convolutional neural network')
compile_cnn._populate_parser(parser_compile_cnn)
parser_consensuses = subparsers.add_parser(
'consensuses',
help='generate Radio Galaxy Zoo consensus classifications')
consensuses._populate_parser(parser_consensuses)
parser_generate_annotator_labels = subparsers.add_parser(
'generate_annotator_labels',
help='generates individual annotator labels')
generate_annotator_labels._populate_parser(parser_generate_annotator_labels)
parser_generate_cnn_outputs = subparsers.add_parser(
'generate_cnn_outputs',
help='generate convolutional neural network training outputs')
generate_cnn_outputs._populate_parser(parser_generate_cnn_outputs)
parser_generate_dataset = subparsers.add_parser(
'generate_dataset', help='generate crowdastro dataset')
generate_dataset._populate_parser(parser_generate_dataset)
parser_generate_test_sets = subparsers.add_parser(
'generate_test_sets', help='generate crowdastro galaxy test sets')
generate_test_sets._populate_parser(parser_generate_test_sets)
parser_generate_training_data = subparsers.add_parser(
'generate_training_data',
help='generate crowdastro galaxy training data')
generate_training_data._populate_parser(parser_generate_training_data)
parser_import_data = subparsers.add_parser(
'import_data', help='import data into crowdastro')
import_data._populate_parser(parser_import_data)
parser_repack_h5 = subparsers.add_parser(
'repack_h5', help='repacks an HDF5 file')
repack_h5._populate_parser(parser_repack_h5)
parser_test = subparsers.add_parser(
'test', help='tests classifiers')
test._populate_parser(parser_test)
parser_train = subparsers.add_parser(
'train', help='trains classifiers')
train._populate_parser(parser_train)
parser_train_cnn = subparsers.add_parser(
'train_cnn', help='trains a convolutional neural network')
train_cnn._populate_parser(parser_train_cnn)
# http://stackoverflow.com/a/11287731/1105803
if len(sys.argv) < 2:
sys.argv.append('--help')
args = parser.parse_args()
logging.captureWarnings(True)
if args.verbose:
logging.root.setLevel(logging.DEBUG)
if args.version:
print(__version__)
return
subcommands = {
'compile_cnn': compile_cnn._main,
'consensuses': consensuses._main,
'generate_annotator_labels': generate_annotator_labels._main,
'generate_cnn_outputs': generate_cnn_outputs._main,
'generate_dataset': generate_dataset._main,
'generate_test_sets': generate_test_sets._main,
'generate_training_data': generate_training_data._main,
'import_data': import_data._main,
'repack_h5': repack_h5._main,
'test': test._main,
'train': train._main,
'train_cnn': train_cnn._main,
}
subcommands[args.subcommand](args)
if __name__ == '__main__':
main()
|
chengsoonong/crowdastro
|
crowdastro/__main__.py
|
Python
|
mit
| 4,138
|
[
"Galaxy"
] |
0e55f02934e0bb1fb2e9442a2faa8c533116521b8ed59449bda8320924ef8624
|
from build.management.commands.base_build import Command as BaseBuild
from django.conf import settings
from protein.models import Protein, ProteinSegment, ProteinConformation, ProteinState, ProteinFamily
from structure.models import Structure, Rotamer
from structure.functions import BlastSearch
from Bio.Blast import NCBIXML, NCBIWWW
import subprocess, shlex, os
class Command(BaseBuild):
help = 'Blastp search custom dbs'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser=parser)
parser.add_argument('-q', help='Query sequence(s) in FASTA format', default=False, type=str, nargs='+')
parser.add_argument('-d', help='Query database', default=False, type=str)
parser.add_argument('--make_db', help='''Create and use custom database. Single argument: (Available presets) 1. xtal - only proteins with structure.
2. all - all GPCRs
3. fasta text input
Multiple arguments: specific protein entry names''', default=False, type=str, nargs='+')
def handle(self, *args, **options):
blastdb = None
if options['d']:
blastdb = options['d'] ### FIXME import/parse blast db
else:
blastdb = 'blastp_out.fasta'
if options['make_db']:
if len(options['make_db'])>1:
prots = Protein.objects.filter(entry_name__in=options['make_db'])
### FIXME
elif len(options['make_db'])==1:
prots = []
fasta = ''
### xtal preset
if options['make_db']==['xtal']:
structs = Structure.objects.all()
for i in structs:
if i.protein_conformation.protein.parent not in prots:
prots.append(i.protein_conformation.protein.parent)
fasta+='>{}\n{}\n'.format(i.protein_conformation.protein.parent.entry_name, i.protein_conformation.protein.parent.sequence)
elif options['make_db']==['all']:
receptor_fams = ProteinFamily.objects.filter(name__startswith='Class')
prots = Protein.objects.filter(accession__isnull=False, family__parent__parent__parent__in=receptor_fams)
for i in prots:
fasta+='>{}\n{}\n'.format(i.entry_name, i.sequence)
else:
fasta+='>{}\n{}\n'.format('single input', options['make_db'][0])
with open('./blastp_out.fasta','w') as f:
f.write(fasta)
make_db_command = shlex.split('makeblastdb -in blastp_out.fasta -dbtype prot -parse_seqids')
subprocess.call(make_db_command)
if options['q']:
for q in options['q']:
if blastdb:
bs = BlastSearch(blastdb=blastdb, top_results=1)
out = bs.run(q)
for o in out:
print(o[0])
print(o[1])
else:
bs = BlastSearch()
out = bs.run(q)
for o in out:
for i in o:
print(i)
# if blastdb=='blastp_out.fasta':
# files = os.listdir()
# for f in files:
# if 'blastp_out.fasta' in f:
# os.remove(f)
|
protwis/protwis
|
protein/management/commands/blastp.py
|
Python
|
apache-2.0
| 2,860
|
[
"BLAST"
] |
5039f05de929ea269aac2e3f4b117feb2c65e6d136bdb7918cf54d5032800b49
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import types
import logging
import numbers
import torch
import numpy as np
from zoo.orca.data.ray_xshards import RayXShards
from zoo.orca.learn.pytorch.training_operator import TrainingOperator
from zoo.orca.learn.pytorch.torch_runner import TorchRunner
from zoo.orca.learn.utils import maybe_dataframe_to_xshards, dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, update_predict_xshards, \
process_xshards_of_pandas_dataframe
from zoo.ray import RayContext
import ray
from ray.exceptions import RayActorError
logger = logging.getLogger(__name__)
def check_for_failure(remote_values):
"""Checks remote values for any that returned and failed.
:param remote_values: List of object IDs representing functions
that may fail in the middle of execution. For example, running
a SGD training loop in multiple parallel actor calls.
:return Bool for success in executing given remote tasks.
"""
unfinished = remote_values
try:
while len(unfinished) > 0:
finished, unfinished = ray.wait(unfinished)
finished = ray.get(finished)
return True
except RayActorError as exc:
logger.exception(str(exc))
return False
def partition_refs_to_creator(partition_refs):
def data_creator(config, batch_size):
from zoo.orca.data.utils import ray_partitions_get_data_label, index_data, get_size
from torch.utils.data import Dataset, DataLoader
class NDArrayDataset(Dataset):
def __init__(self, x, y):
self.x = x # features
self.y = y # labels
def __len__(self):
return get_size(self.y)
def __getitem__(self, i):
return index_data(self.x, i), index_data(self.y, i)
params = {"batch_size": batch_size, "shuffle": True}
for arg in ["shuffle", "sampler", "batch_sampler", "num_workers", "collate_fn",
"pin_memory", "drop_last", "timeout", "worker_init_fn",
"multiprocessing_context"]:
if arg in config:
params[arg] = config[arg]
data, label = ray_partitions_get_data_label(ray.get(partition_refs),
allow_tuple=False,
allow_list=False)
print("Data size on worker: ", len(label))
dataset = NDArrayDataset(data, label)
data_loader = DataLoader(dataset, **params)
return data_loader
return data_creator
class PyTorchRayEstimator:
def __init__(
self,
*,
model_creator,
optimizer_creator,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=TrainingOperator,
initialization_hook=None,
config=None,
scheduler_step_freq="batch",
use_tqdm=False,
backend="torch_distributed",
workers_per_node=1):
# todo remove ray_ctx to run on workers
ray_ctx = RayContext.get()
if not (isinstance(model_creator, types.FunctionType) and
isinstance(optimizer_creator, types.FunctionType)): # Torch model is also callable.
raise ValueError(
"Must provide a function for both model_creator and optimizer_creator")
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.scheduler_creator = scheduler_creator
self.training_operator_cls = training_operator_cls
self.scheduler_step_freq = scheduler_step_freq
self.use_tqdm = use_tqdm
if not training_operator_cls and not loss_creator:
raise ValueError("If a loss_creator is not provided, you must "
"provide a custom training operator.")
self.initialization_hook = initialization_hook
self.config = {} if config is None else config
worker_config = self.config.copy()
params = dict(
model_creator=self.model_creator,
optimizer_creator=self.optimizer_creator,
loss_creator=self.loss_creator,
scheduler_creator=self.scheduler_creator,
training_operator_cls=self.training_operator_cls,
scheduler_step_freq=self.scheduler_step_freq,
use_tqdm=self.use_tqdm,
config=worker_config,
metrics=metrics
)
if backend == "torch_distributed":
cores_per_node = ray_ctx.ray_node_cpu_cores // workers_per_node
num_nodes = ray_ctx.num_ray_nodes * workers_per_node
RemoteRunner = ray.remote(num_cpus=cores_per_node)(TorchRunner)
self.remote_workers = [
RemoteRunner.remote(**params) for i in range(num_nodes)
]
ray.get([
worker.setup.remote(cores_per_node)
for i, worker in enumerate(self.remote_workers)
])
head_worker = self.remote_workers[0]
address = ray.get(head_worker.setup_address.remote())
logger.info(f"initializing pytorch process group on {address}")
ray.get([
worker.setup_torch_distribute.remote(address, i, num_nodes)
for i, worker in enumerate(self.remote_workers)
])
elif backend == "horovod":
from zoo.orca.learn.horovod.horovod_ray_runner import HorovodRayRunner
self.horovod_runner = HorovodRayRunner(ray_ctx,
worker_cls=TorchRunner,
worker_param=params,
workers_per_node=workers_per_node)
self.remote_workers = self.horovod_runner.remote_workers
cores_per_node = self.horovod_runner.cores_per_node
ray.get([
worker.setup.remote(cores_per_node)
for i, worker in enumerate(self.remote_workers)
])
ray.get([
worker.setup_horovod.remote()
for i, worker in enumerate(self.remote_workers)
])
else:
raise Exception("Only \"torch_distributed\" and \"horovod\" are supported "
"values of backend, but got {}".format(backend))
self.num_workers = len(self.remote_workers)
def train(self,
data,
epochs=1,
batch_size=32,
profile=False,
reduce_results=True,
info=None,
feature_cols=None,
label_cols=None):
"""
See the documentation in
'zoo.orca.learn.pytorch.estimator.PyTorchRayEstimatorWrapper.fit'.
"""
from zoo.orca.data import SparkXShards
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="fit",
num_workers=self.num_workers)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)
from zoo.orca.data.utils import process_spark_xshards
ray_xshards = process_spark_xshards(data, self.num_workers)
def transform_func(worker, partition_refs):
data_creator = partition_refs_to_creator(partition_refs)
# Should not wrap DistributedSampler on DataLoader for SparkXShards input.
return worker.train_epochs.remote(
data_creator, epochs, batch_size, profile, info, False)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else:
assert isinstance(data, types.FunctionType), \
"data should be either an instance of SparkXShards or a callable function, but " \
"got type: {}".format(type(data))
success, worker_stats = self._train_epochs(data,
epochs=epochs,
batch_size=batch_size,
profile=profile,
info=info)
epoch_stats = list(map(list, zip(*worker_stats)))
if reduce_results:
for i in range(len(epoch_stats)):
epoch_stats[i] = self._process_stats(epoch_stats[i])
return epoch_stats
else:
return epoch_stats
def _process_stats(self, worker_stats):
stats = {
"num_samples": sum(
stats.pop("num_samples", np.nan) for stats in worker_stats)
}
for stat_key in worker_stats[0]:
if isinstance(worker_stats[0], numbers.Number):
stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
else:
stats[stat_key] = worker_stats[0][stat_key]
return stats
def _train_epochs(self, data_creator, epochs=1, batch_size=32, profile=False, info=None):
params = dict(data_creator=data_creator, epochs=epochs,
batch_size=batch_size, profile=profile, info=info)
remote_worker_stats = []
for i, w in enumerate(self.remote_workers):
stats = w.train_epochs.remote(**params)
remote_worker_stats.append(stats)
success = check_for_failure(remote_worker_stats)
if success:
return success, ray.get(remote_worker_stats)
else:
return success, None
def validate(self,
data,
batch_size=32,
num_steps=None,
profile=False,
info=None,
feature_cols=None,
label_cols=None):
"""
See the documentation in
'zoo.orca.learn.pytorch.estimator.PyTorchRayEstimatorWrapper.evaluate'.
"""
from zoo.orca.data import SparkXShards
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="evaluate",
num_workers=self.num_workers)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)
from zoo.orca.data.utils import process_spark_xshards
ray_xshards = process_spark_xshards(data, self.num_workers)
def transform_func(worker, partition_refs):
data_creator = partition_refs_to_creator(partition_refs)
# Should not wrap DistributedSampler on DataLoader for SparkXShards input.
return worker.validate.remote(
data_creator, batch_size, num_steps, profile, info, False)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else:
assert isinstance(data, types.FunctionType), \
"data should be either an instance of SparkXShards or a callable function, but " \
"got type: {}".format(type(data))
params = dict(data_creator=data, batch_size=batch_size, num_steps=num_steps,
profile=profile, info=info)
worker_stats = ray.get([w.validate.remote(**params) for w in self.remote_workers])
return self._process_stats(worker_stats)
def _predict_spark_xshards(self, xshards, param):
ray_xshards = RayXShards.from_spark_xshards(xshards)
def transform_func(worker, shards_ref):
data_creator = lambda config, batch_size: shards_ref
return worker.predict.remote(
data_creator, **param)
pred_shards = ray_xshards.transform_shards_with_actors(self.remote_workers,
transform_func)
spark_xshards = pred_shards.to_spark_xshards()
return spark_xshards
def predict(self,
data,
batch_size=32,
feature_cols=None,
profile=False):
from zoo.orca.data import SparkXShards
param = dict(
batch_size=batch_size,
profile=profile
)
from pyspark.sql import DataFrame
if isinstance(data, DataFrame):
xshards, _ = dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=None,
mode="predict")
pred_shards = self._predict_spark_xshards(xshards, param)
result = convert_predict_xshards_to_dataframe(data, pred_shards)
elif isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols)
pred_shards = self._predict_spark_xshards(data, param)
result = update_predict_xshards(data, pred_shards)
else:
raise ValueError("Only xshards or Spark DataFrame is supported for predict")
return result
def get_model(self):
"""Returns the learned model(s)."""
state = self.get_state_dict()
model = self.model_creator(self.config)
model_state = state["models"][0]
model.load_state_dict(model_state)
return model.module if hasattr(model, "module") else model
def save(self, model_path):
"""Saves the Estimator state to the provided model_path.
:param model_path: (str) Path to save the model.
"""
state_dict = self.get_state_dict()
torch.save(state_dict, model_path)
return model_path
def get_state_dict(self):
stream_ids = [
worker.state_stream.remote()
for worker in self.remote_workers
]
# get the first task id that finished executing.
[stream_id], stream_ids = ray.wait(stream_ids, num_returns=1, timeout=None)
byte_obj = ray.get(stream_id)
_buffer = io.BytesIO(byte_obj)
state_dict = torch.load(
_buffer,
map_location="cpu")
return state_dict
def load(self, model_path):
"""Loads the Estimator and all workers from the provided model_path.
:param model_path: (str) Path to the existing model.
"""
state_dict = torch.load(model_path)
self.load_state_dict(state_dict)
def load_state_dict(self, state_dict, blocking=True):
_buffer = io.BytesIO()
torch.save(state_dict, _buffer)
state_stream = _buffer.getvalue()
state_id = ray.put(state_stream)
remote_calls = [
worker.load_state_stream.remote(state_id)
for worker in self.remote_workers
]
if blocking:
ray.get(remote_calls)
def shutdown(self, force=False):
"""Shuts down workers and releases resources."""
if not force:
cleanup = [
worker.shutdown.remote() for worker in self.remote_workers
]
try:
ray.get(cleanup)
[
worker.__ray_terminate__.remote()
for worker in self.remote_workers
]
except RayActorError:
logger.warning(
"Failed to shutdown gracefully, forcing a shutdown.")
for worker in self.remote_workers:
logger.warning("Killing worker {}.".format(worker))
ray.kill(worker)
else:
for worker in self.remote_workers:
logger.debug("Killing worker {}.".format(worker))
ray.kill(worker)
self.remote_workers = []
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/learn/pytorch/pytorch_ray_estimator.py
|
Python
|
apache-2.0
| 17,478
|
[
"ORCA"
] |
f0952ed54b5d2d0483ae3a3fff2d74b4e03913635b20c127636b343a37424a4e
|
"""
Histogram-related functions
"""
import contextlib
import functools
import operator
import warnings
import numpy as np
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _ptp(x):
"""Peak-to-peak value of x.
This implementation avoids the problem of signed integer arrays having a
peak-to-peak value that cannot be represented with the array's data type.
This function returns an unsigned value for signed integer arrays.
"""
return _unsigned_subtract(x.max(), x.min())
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
If the bin width from the FD estimator is 0, the Sturges estimator is used.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off-the-shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance-based estimators will be of
use, so we revert to the Sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, str):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, normed=None, weights=None, density=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This is equivalent to the `density` argument, but produces incorrect
results for unequal bin widths. It should not be used.
.. versionchanged:: 1.15.0
DeprecationWarnings are actually emitted.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
DeprecationWarning, stacklevel=3)
normed = None
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing `normed=True` on non-uniform bins has always been "
"broken, and computes neither the probability density "
"function nor the probability mass function. "
"The result is only correct if the bins are uniform, when "
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
np.VisibleDeprecationWarning, stacklevel=3)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
DeprecationWarning, stacklevel=3)
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
weights=None, density=None):
if hasattr(sample, 'shape'): # same condition as used in histogramdd
yield sample
else:
yield from sample
with contextlib.suppress(TypeError):
yield from bins
yield weights
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# handle the aliasing normed argument
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
|
WarrenWeckesser/numpy
|
numpy/lib/histograms.py
|
Python
|
bsd-3-clause
| 39,865
|
[
"Gaussian"
] |
060184fbc835d1b041b8fd9571c55aa8fbf607bb44aa89814ffe6684e884cec9
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import os
# For testing
import __builtin__
if not hasattr(__builtin__, '_'):
def _(string):
return string
version = '1.1.4'
author = 'Jendrik Seipp'
authorMail = 'jendrikseipp@web.de'
url = 'http://rednotebook.sourceforge.net'
forum_url = 'http://apps.sourceforge.net/phpbb/rednotebook/'
translation_url = 'https://translations.launchpad.net/rednotebook/'
bug_url = 'https://bugs.launchpad.net/rednotebook/+filebug'
developers = ['Jendrik Seipp <jendrikseipp@web.de>',
'',
'Past Contributors:',
'Alexandre Cucumel <superkiouk@gmail.com>',
]
comments = '''\
RedNotebook is a graphical journal to keep track of notes and \
thoughts. It includes a calendar navigation, customizable \
templates, export functionality and word clouds. You can also \
format, tag and search your entries.\
'''
license_text = '''\
Copyright (c) 2009,2010,2011 Jendrik Seipp
RedNotebook is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
RedNotebook is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with RedNotebook; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
command_line_help = '''\
RedNotebook %s
The optional journal-path can be one of the following:
- An absolute path (e.g. /home/username/myjournal)
- A relative path (e.g. ../dir/myjournal)
- The name of a directory under $HOME/.rednotebook/ (e.g. myjournal)
If the journal-path is omitted the last session's journal will be used.
At the first program start this defaults to "$HOME/.rednotebook/data".
''' % version
greeting = _('Hello!')
intro=_('''Some example text has been added to help you start and \
you can erase it whenever you like.''')
### Translators: "Help" -> noun
help_par = _('The example text and more documentation is available under "Help" -> "Contents".')
overview1 = _('The interface is divided into three parts:')
### Translators: The location "left"
overview21 = _('Left')
overview22 = _('Navigation with the calendar')
### Translators: The location "center"
overview31 = _('Center')
overview32 = _('Text for a day')
### Translators: The location "right"
overview41 = _('Right')
overview42 = _('Annotations to a day')
### Translators: noun
preview = _('Preview')
preview1 = _('''There are two modes in RedNotebook, the __editing__ mode and \
the __preview__ mode.''')
### Translators: Preview -> noun
preview2 = _('Click on Preview above to see the difference.')
preview_par = ' '.join([preview1, preview2])
example_entry = _('''Today I went to the //pet shop// and bought a **tiger**. Then we went to the \
--pool-- park and had a nice time playing \
ultimate frisbee. Afterwards we watched "__Life of Brian__".''')
annotations = _('Annotations')
ann2 = _('Annotations are notes that can be sorted into categories.')
ann3 = _('''For example you could create the category "Ideas" \
and then add today's ideas to it.''')
ann_par = ' '.join([ann2, ann3])
templates = ('Templates')
temp1 = ('RedNotebook supports templates.')
temp2 = ('Click on the arrow next to the "Template" button to see some options.')
temp3 = ('''You can have one template for every day \
of the week and unlimited arbitrarily named templates.''')
temp_par = ' '.join([temp1, temp2, temp3])
### Translators: both are verbs
save = _('Save and Export')
save1 = _('''Everything you enter will be saved automatically at \
regular intervals and when you exit the program.''')
save2 = _('To avoid data loss you should backup your journal regularly.')
save3 = _('"Backup" in the "Journal" menu saves all your entered data in a zip file.')
save4 = _('In the "Journal" menu you also find the "Export" button.')
save5 = _('Click on "Export" and export your diary to Plain Text, HTML or Latex.')
save_par = ' '.join([save1, save2, save3, save4, save5])
error1 = _('If you encounter any errors, please drop me a note so I can fix them.')
error2 = _('Any feedback is appreciated.')
error_par = ' '.join([error1, error2])
goodbye_par = _('Have a nice day!')
completeWelcomeText = '''\
%(greeting)s %(intro)s %(help_par)s
%(overview1)s
- **%(overview21)s**: %(overview22)s
- **%(overview31)s**: %(overview32)s
- **%(overview41)s**: %(overview42)s
=== %(preview)s ===
%(preview_par)s
=== %(annotations)s ===
%(ann_par)s
=== %(save)s ===
%(save_par)s
%(error_par)s
%(goodbye_par)s''' % globals()
welcome_day = {'text': completeWelcomeText,
u'Cool Stuff': {u'Ate **two** cans of spam': None},
_(u'Ideas'): {_(u'Use a cool journal app'): None},
u'Movies': {u"Monty Python's Life of Brian": None},
u'Tags': {u'Work': None, u'Documentation': None},
}
example_day1 = {
'text': '''\
=== Annotations in Categories ===
%(ann_par)s
- Ideas
- Invent Anti-Hangover-Machine
- Movies
- Monty Python and the Holy Grail
The name "Categories" is a little bit confusing. It does not mean that a day is \
put into a category, but that there is additional content on the right, \
sorted into categories. A category will contain several notes distributed over various days.
For example you could want to remember all the movies you watch. \
Each time you watch a new one, add the category "Movies" with the \
entry "Name of the movie" to the day.
I’ll give you another example: I like to maintain a list of cool things I have done. \
So if I did a cool thing on some day, I navigate to that day and add the category \
"Cool Stuff" with the entry "Visit the pope" (Sadly I haven’t done that, yet ;-) ). \
When I have done more cool things on many days, they all have a category "Cool Stuff" \
and many different entries. It is possible to export only that category and \
get a list of the cool stuff that happened to me with the respective dates.
Additionally you can select the "Cool Stuff" category in the word cloud window \
to get a list of all the cool things.
Maybe a good thing to know is the following:
"Tags" is a category, the tagnames are entries in the category "Tags", \
distributed over various days.
Similarly you can have a category "Movies" and add it together with \
the movie's name to every day on which you see a new film.
Category entries can have all of the formatting that the main text supports, \
so e.g. you can add bold text, links or images.''' % globals(),
u'Cool Stuff': {u'Went to see the pope': None},
u'Ideas': {u'Invent Anti-Hangover-Machine': None},
u'Movies': {u'Monty Python and the Holy Grail': None},
u'Tags': {u'Documentation': None, u'Projects': None},
u'Todo': {u'**Wash the dishes**': None},
}
multiple_entries_text = '''\
=== Multiple Entries ===
You can add multiple entries to one day in two ways:
- Use two different journals (one named “Work”, the other “Play”)
- Separate your two entries by different titles (===Work===, ===Play===)
- Use a horizontal separator line (20 “=”s)
'''
multiple_entries_example = '''\
====================
=== Work ===
Here goes the first entry.
====================
=== Play ===
Here comes the entry about the fun stuff.
'''
example_day2 = {
'text': multiple_entries_text + multiple_entries_example,
u'Tags': {u'Documentation': None, u'Work': None, u'Play': None},}
example_day3 = {
'text': '''\
=== Todo list ===
You can also use RedNotebook as a todo list. A big advantage is, that you never \
have to explicitly state the date when you added the todo item, you just add it \
on one day and it remains there until you delete it.
Here is how it works:
- On the right click on "New Entry"
- Fill "Todo" and "Remember the milk" in the fields and hit "OK"
- Select the categories cloud from the drop down menu on the left
- Now you can click on "todo" and see all your todo items
- To tick off a todo item you can strike it out by adding "--" around the item.
- To mark an item as important, add "**" around it.
So --Remember the milk-- becomes struck through and **Wash the dishes** becomes bold.
You can see all your todo items at once by clicking "todo" in the category cloud \
on the left. There you can also \
group your todo items into important and finished items by hitting "Entry" \
at the top of the list.
It probably sometimes makes sense to add the todo items to the day you want to \
have completed them (deadline day).
Once you've finished an item, you could also change its category name from \
"Todo" to "Done".''',
u'Tags': {u'Documentation': None,},
u'Todo': {u'--Remember the milk--': None,
u'Take a break': None},
u'Done': {u'--Check mail--': None,},
}
example_content = [welcome_day, example_day1, example_day2, example_day3]
ann_help_text = example_day1['text'].replace('===', '==')
todo_help_text = example_day3['text']
help_text = '''
== Layout ==
%(overview1)s
- **%(overview21)s**: %(overview22)s
- **%(overview31)s**: %(overview32)s
- **%(overview41)s**: %(overview42)s
%(preview1)s
== Text ==
The main text field is the container for your normal diary entries like this one:
%(example_entry)s
== Format ==
As you see, the text can be formatted **bold**, \
//italic//, --struck through-- and __underlined__. As a convenience there \
is also the "Format" button, with which you can format the main text and entries \
in the categories tree on the right.
A blank line starts a new **paragraph**, two backslashes\\\\ result in a **newline**.
To see the result, click on the "Preview" button.
You can also see how \
this text was formatted by looking at its [source source.txt].
**Lists** can be created by using the following style, If you use "+"
instead of "-" you can create a **numbered list**.
```
- First Item
- Indented Item
- Do not forget two blank lines after a list
```
%(ann_help_text)s
== Images, Files and Links ==
RedNotebook lets you insert images, files and links into your entries. \
To do so, select the appropriate option in the "Insert" pull-down menu \
above the main text field. The text will be inserted at the current \
cursor position.
With the insert button you cannot insert **links to directories** on your computer. \
Those can be inserted manually however (""[Home ""file:///home/""]"").
== %(templates)s ==
%(temp_par)s
The files 1.txt to 7.txt in the template directory correspond to the templates \
for each day of the week. The current weekday's template will be filled \
into the text area when you click on "Template". You can open the template files \
from inside RedNotebook by opening the menu next to the "Template" button.
== Tags ==
Tagging an entry (e.g. with the tag "Work") is also easy: \
On the right, click on "Add Tag" and insert \
"Work" into the lower textbox. The result looks like:
- Tags
- Work
You can see a tag cloud on the left by activating the "Clouds" tab and \
selecting "Tags". Get a list of all tags with a given name by clicking \
on that tag in the cloud.
== Search ==
On the left you find the search box. You can search for text, display a \
category's content or show all days with a given tag. \
Double-clicking on a day lets you jump to it.
== Clouds ==
Clicking on the "Clouds" tab on the left lets you view the most often used words in your journal.
You can select to view your category or tag clouds by clicking on the scroll-down menu.
If words appear in the cloud that you don't want to see there, just right-click on them. \
Alternatively you can open the Preferences dialog and add the words to the cloud blacklist there.
== Spellcheck ==
RedNotebook supports spellchecking your entries if you have \
python-gtkspell installed (Only available on Linux). \
To highlight all misspelled words in your entries, select the corresponding option in \
the preferences window.
Since gtkspell 2.0.15, you can select the spellchecking language by right-clicking on the \
main text area (in edit mode) and choosing it from the submenu "Languages".
== Options ==
Make sure you check out the customizable options in the Preferences dialog. You can
open this dialog by clicking on the entry in the "Edit" menu.
== Save ==
%(save1)s %(save2)s %(save3)s
== Export ==
%(save4)s %(save5)s
Since version 0.9.2 you can also directly export your journal to PDF. If the \
option does not show up in the export assistant, you need to install \
pywebkitgtk version 1.1.5 or later (the package is sometime called \
python-webkit).
**Latex caveats**
Make sure to type all links with the full path including the protocol:
- http://www.wikipedia.org or http://wikipedia.org (--wikipedia.org--, --"""www.wikipedia.org"""--)
- file:///home/sam/myfile.txt (--/home/sam/myfile.txt--)
== Synchronize across multiple computers ==[sync]
Syncing RedNotebook with a remote server is easy. You can either use a \
cloud service like Ubuntu One or Dropbox or save your journal to your \
own server.
=== Ubuntu One and Dropbox ===
If you are registered for either [Ubuntu One ""http://one.ubuntu.com""] \
or [Dropbox http://www.dropbox.com], you can just save your journal in \
a subfolder of the respective synchronized folder in your home directory.
=== Directly save to remote FTP or SSH server ===
Since version 0.8.9 you can have your journal directory on a remote server. The feature is \
however only available on Linux machines. To use the feature you have to connect your computer \
to the remote server. This is most easily done in Nautilus by clicking on "File" -> \
"Connect to Server". Be sure to add a bookmark for the server. This way you can see your \
server in Nautilus at all times on the left side. The next time you open RedNotebook you \
will find your server in the "New", "Open" and "Save As" dialogs. There you can select \
a new folder on the server for your journal.
=== External sync with remote server ===
If you have your own server, you might want to try \
[Conduit http://www.conduit-project.org] or \
[Unison http://www.cis.upenn.edu/~bcpierce/unison] for example. \
To sync or backup your journal you have to sync your journal folder \
(default is "$HOME/.rednotebook/data/") with a folder on your server. \
It would be great if someone could write a tutorial about that.
Obviously you have to be connected to the internet to use that feature. Be sure to backup your \
data regularly if you plan to save your content remotely. There are always more pitfalls when \
an internet connection is involved.
=== Dual Boot ===
Using RedNotebook from multiple operating systems on the same computer is \
also possible. Save your Journal with "Journal->Save As" in a directory \
all systems can access. Then on the other systems you can open the \
journal with "Journal->Open".
Optionally you can also **share your settings** and templates. \
This is possible since version 0.9.4. The relevant setting is found in \
the file "rednotebook/files/default.cfg". There you can set the value of \
userDir to the path where you want to share your settings between the \
systems.
== Portable mode ==
RedNotebook can be run in portable mode. In this mode, the \
template directory and the configuration and log file are saved \
in the application directory instead of in the home directory. \
Additionally the path to the last opened journal is remembered \
relatively to the application directory.
To use RedNotebook on a flash drive on Windows, run the installer and \
select a directory on your USB drive as the installation directory. \
You probably don't need the "Start Menu Group" and Desktop icons in \
portable mode.
To **activate portable mode**, change into the files/ directory and in the \
default.cfg file set portable=1.
== Convert Latex output to PDF ==
In recent RedNotebook versions you can export your journal directly to PDF, \
so this section may be obsolete. \
However, there may be some people who prefer to export their \
journal to Latex first and convert it to PDF later. Here is how you do it:
=== Linux ===
For the conversion on Linux you need some extra packages: texlive-latex-base and \
texlive-latex-recommended. Maybe you also need texlive-latex-extra. Those contain \
the pdflatex program and are available in the repositories of most Linux distros.
You can convert the .tex file by typing the following text in a command line: \
``pdflatex your-rednotebook-export.tex``
Alternatively you can install a Latex editor like Kile \
(http://kile.sourceforge.net/), open the .tex file with it and hit the export \
button.
However there are some pitfalls: Sometimes not all exported characters can be \
converted to pdf.
E.g. problems occur when exporting \
the euro sign (€) or other "non-standard" characters to pdf.
If you run into any problems during the conversion, the easiest way to solve \
them is to install a latex editor and do the conversion with it. That way \
you can see the errors right away and get rid of them by editing the file.
=== Windows ===
You can open an exported Latex file with Texniccenter and convert it to PDF \
with MikTex. Visit www.texniccenter.org/ and www.miktex.org \
for the programs and instructions. Basically you have to download both programs, \
open the .tex file with Texniccenter and select "Build Output" from the \
"Output" menu. The program will then create the beautifully looking PDF in the
same directory.
== Keyboard Shortcuts ==
|| Action | Shortcut |
| Preview (On/Off) | <Ctrl> + P |
| Find | <Ctrl> + F |
| Go back one day | <Ctrl> + PageDown |
| Go forward one day | <Ctrl> + PageUp |
| Insert link | <Ctrl> + L |
| Insert date/time | <Ctrl> + D |
| New category entry | <Ctrl> + N |
| Add Tag | <Ctrl> + T |
You can find other shortcuts in the menus.
== Encryption ==
You can use e.g. [TrueCrypt http://www.truecrypt.org] to encrypt your \
journal. Nick Bair has written a nice tutorial about \
[encrypting RedNotebook files \
http://sourceforge.net/apps/phpbb/rednotebook/viewtopic.php?f=3&t=14] \
on Windows. The procedure for other operating systems should be similar. \
The general idea is to create and mount an encrypted folder with \
TrueCrypt and put your journal files in there.
In recent Linux distributions is has become pretty easy to encrypt \
your entire home partition. I would recommend that to anyone who \
wishes to protect her/his diary and all other personal files. \
This method is especially useful for laptop users, because their \
computers are more likely to be stolen. If you encrypt your home \
partition all RedNotebook data will be encrypted, too.
== Tips ==
%(multiple_entries_text)s
%(todo_help_text)s
=== Week Numbers ===
If you'd like to see the week numbers in the calendar, you can set the \
value of weekNumbers to 1 in the configuration file. This file \
normally resides at $HOME/.rednotebook/configuration.cfg
=== Language ===
If you want to change RedNotebook's language, setting the environment \
variable LANG to a different language code should be sufficient. \
Language codes have e.g. the format "de_DE" or "de_DE.UTF-8" (German). \
To set the language to English you can also set the code to "C".
On Linux, start a terminal and call ``LANG=de_DE.utf8``. Then in the \
same terminal, run ``rednotebook``. The language change will be gone \
however once you close the terminal.
On Windows, set or create a LANG environment variable with the desired \
code:
+ Right-click My Computer and click Properties.
+ In the System Properties window, click on the Advanced tab.
+ In the Advanced section, click the Environment Variables button.
+ Click the New button and insert LANG at the top and e.g. de or de_DE or
de_DE.UTF-8 (use your [language code ""http://en.wikipedia.org/wiki/ISO_639-1""]).
=== Titles ===
You can insert titles into your post by adding "="s around your title
text. = My Title = is the biggest heading, ====== My Title ====== is
the smallest heading. A title line can only contain the title, nothing
else.
Numbered titles can be created by using "+" instead of "=".
""+ My Title +"" produces a title like "1.", ++++++ My Title ++++++
produces a title like 0.0.0.0.0.1
=== Insert HTML or Latex code ===
To insert custom code into your entries surround the code with single \
quotes. Use 2 single quotes for inline insertions and 3 single quotes \
if you want to insert a whole paragraph. For paragraphs be sure to put \
the single quotes on their own line. \
This feature requires you to use webkit for previews (Only available on Linux).
|| Text | Output |
| ``''<font color="red">Red</font>''`` | ''<font color="red">Red</font>'' |
| ``''$a^2$''`` | ''$a^2$'' (''a<sup>2</sup>'' in Latex) |
This feature can be used to insert e.g. latex formulas:
```
\'''
==\sum_{i=1}^{n} i = \\frac{n \cdot (n+1)}{2}==
\'''
```
will produce a nice looking formula in the Latex export.
=== Verbatim text (Preserve format) ===
To insert preformatted text preserving newlines and spaces, you can \
use the backquotes (`). Use 2 backquotes for inline insertions and 3 \
backquotes if you want to insert a whole paragraph. \
For paragraphs be sure to put the backquotes on their own line. \
This feature requires you to use webkit for previews (Only available on Linux).
Two examples (have a look at the [source source.txt] to see how it's done):
To install rednotebook use ``sudo apt-get install rednotebook``.
```
class Robot(object):
def greet(self):
print 'Hello World'
robot = Robot()
robot.greet()
```
=== Unparsed text ===
Formatting commands inside two pairs of "" are not interpreted (""**not bold**"").
=== Comments ===
Comments can be inserted after percent signs (**%%**). They will not be shown in the \
preview and the exports. The %% has to be the first character on the line.
=== List of all Entries ===
To get a list of all entries, just search for " " (the space character). \
This character is most likely included in all entries. You can sort the \
resulting list chronologically by pressing the "Date" button.
== Command line options ==
```
Usage: rednotebook [options] [journal-path]
RedNotebook %(version)s
The optional journal-path can be one of the following:
- An absolute path (e.g. /home/username/myjournal)
- A relative path (e.g. ../dir/myjournal)
- The name of a directory under $HOME/.rednotebook/ (e.g. myjournal)
If the journal-path is omitted the last session's journal will be used.
At the first program start this defaults to "$HOME/.rednotebook/data".
Options:
-h, --help show this help message and exit
-d, --debug Output debugging messages (default: False)
-m, --minimized Start mimimized to system tray (default: False)
```
== Data Format ==
In this paragraph I will explain shortly what the RedNotebook files \
consist of. Firstly it is important to understand that the content \
is saved in a directory with many files, not just one file. \
The directory name is used as a name for the journal.
In the directory there are several files all conforming to the naming \
scheme "2010-05.txt" (<year>-<month>.txt). Obviously these files \
correspond to months (May 2010).
Each month file contains text for the days of that month. \
The text is actually [YAML www.yaml.org] markup. Without the \
(unnecessary) python directives the files look like this:
```
24: {text: "This is a normal text entry."}
25:
Ideas: {"Invent Anti-Hangover machine": null}
text: "This is another text entry, shown in the main text area."
```
As you can see the data format uses a dictionary (or hashmap structure) \
for storing the information. The outer dictionary has the daynumbers as \
keys and the day content as values. The day values consist of another \
dictionary. It can have a key "text" whose value will be inserted in \
the main content area. Additionally there can be multiple other keys \
that stand for the categories that belong to that day. Each category \
contains a dictionary with only one key, the category entry.
== Questions ==
If you have any questions or comments, feel free to post them in the \
[forum http://apps.sourceforge.net/phpbb/rednotebook/] or \
contact me directly.
== Bugs ==
There is no software without bugs, so if you encounter one please drop me a note.
This way RedNotebook can get better not only for you, but for all users.
Bug reports should go [here https://bugs.launchpad.net/rednotebook], but if you
don't know how to use that site, a simple mail is equally fine.
''' % globals()
desktop_file = '''\
[Desktop Entry]
Version=1.0
Name=RedNotebook
GenericName=Journal
Comment=Daily journal with calendar, templates and keyword searching
Exec=rednotebook
Icon=rednotebook
Terminal=false
Type=Application
Categories=Office;
StartupNotify=true
'''
def write_documentation(dir):
'''
Write the documenation as html to a directory
Include the original markup as "source.txt"
'''
from rednotebook.util import filesystem
from rednotebook.util import markup
filesystem.write_file(os.path.join(dir, 'source.txt'), help_text)
headers = [_('RedNotebook Documentation'), version, '']
options = {'toc': 1,}
html = markup.convert(help_text, 'xhtml', headers, options)
filesystem.write_file(os.path.join(dir, 'help.html'), html)
if __name__ == '__main__':
import sys
sys.path.insert(0, os.path.abspath("./../"))
print completeWelcomeText
print '*'*80
print help_text
doc_dir = '../doc'
doc_dir = os.path.abspath(doc_dir)
write_documentation(doc_dir)
#logging.getLogger('').setLevel(logging.DEBUG)
|
pakesson/rednotebook
|
rednotebook/info.py
|
Python
|
gpl-2.0
| 26,888
|
[
"Brian",
"VisIt"
] |
4408aeeb24d459b4aa3f7d77225dce566e33154c26d6604bbf858619641848a2
|
"""
Manage Telemetry alert configurations
=====================================
.. versionadded:: 2016.3.0
Create, Update and destroy Mongo Telemetry alert configurations.
This module uses requests, which can be installed via package, or pip.
This module accepts explicit credential (telemetry api key)
or can also read api key credentials from a pillar.
Example:
.. code-block:: yaml
ensure telemetry alert X is defined on deployment Y:
telemetry_alert.present:
- deployment_id: "rs-XXXXXX"
- metric_name: "testMetric"
- alert_config:
max: 1
filter: SERVER_ROLE_MONGOD_PRIMARY
escalate_to: "example@pagerduty.com"
- name: "**MANAGED BY ORCA DO NOT EDIT BY HAND** manages alarm on testMetric"
"""
def __virtual__():
# Only load if telemetry is available.
if "telemetry.get_alert_config" in __salt__:
return "telemetry_alert"
return (False, "telemetry module could not be loaded")
def present(
name, deployment_id, metric_name, alert_config, api_key=None, profile="telemetry"
):
"""
Ensure the telemetry alert exists.
name
An optional description of the alarm (not currently supported by telemetry API)
deployment_id
Specifies the ID of the root deployment resource
(replica set cluster or sharded cluster) to which this alert definition is attached
metric_name
Specifies the unique ID of the metric to whose values these thresholds will be applied
alert_config: Is a list of dictionaries where each dict contains the following fields:
filter
By default the alert will apply to the deployment and all its constituent resources.
If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope.
min
the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced.
max
the largest "ok" value the metric may take on; if missing or null, no maximum is enforced.
notify_all
Used to indicate if you want to alert both onCallEngineer and apiNotifications
api_key
Telemetry api key for the user
profile
A dict of telemetry config information. If present, will be used instead of
api_key.
"""
ret = {"name": metric_name, "result": True, "comment": "", "changes": {}}
saved_alert_config = __salt__["telemetry.get_alert_config"](
deployment_id, metric_name, api_key, profile
)
post_body = {
"deployment": deployment_id,
"filter": alert_config.get("filter"),
"notificationChannel": __salt__["telemetry.get_notification_channel_id"](
alert_config.get("escalate_to")
).split(),
"condition": {
"metric": metric_name,
"max": alert_config.get("max"),
"min": alert_config.get("min"),
},
}
# Diff the alert config with the passed-in attributes
difference = []
if saved_alert_config:
# del saved_alert_config["_id"]
for k, v in post_body.items():
if k not in saved_alert_config:
difference.append("{}={} (new)".format(k, v))
continue
v2 = saved_alert_config[k]
if v == v2:
continue
if isinstance(v, str) and v == str(v2):
continue
if isinstance(v, float) and v == float(v2):
continue
if isinstance(v, int) and v == int(v2):
continue
difference.append("{}='{}' was: '{}'".format(k, v, v2))
else:
difference.append("new alert config")
create_or_update_args = (
deployment_id,
metric_name,
alert_config,
api_key,
profile,
)
if saved_alert_config: # alert config is present. update, or do nothing
# check to see if attributes matches is_present. If so, do nothing.
if len(difference) == 0:
ret["comment"] = "alert config {} present and matching".format(metric_name)
return ret
if __opts__["test"]:
msg = "alert config {} is to be updated.".format(metric_name)
ret["comment"] = msg
ret["result"] = "\n".join(difference)
return ret
result, msg = __salt__["telemetry.update_alarm"](*create_or_update_args)
if result:
ret["changes"]["diff"] = difference
ret["comment"] = "Alert updated."
else:
ret["result"] = False
ret["comment"] = "Failed to update {} alert config: {}".format(
metric_name, msg
)
else: # alert config is absent. create it.
if __opts__["test"]:
msg = "alert config {} is to be created.".format(metric_name)
ret["comment"] = msg
ret["result"] = None
return ret
result, msg = __salt__["telemetry.create_alarm"](*create_or_update_args)
if result:
ret["changes"]["new"] = msg
else:
ret["result"] = False
ret["comment"] = "Failed to create {} alert config: {}".format(
metric_name, msg
)
return ret
def absent(name, deployment_id, metric_name, api_key=None, profile="telemetry"):
"""
Ensure the telemetry alert config is deleted
name
An optional description of the alarms (not currently supported by telemetry API)
deployment_id
Specifies the ID of the root deployment resource
(replica set cluster or sharded cluster) to which this alert definition is attached
metric_name
Specifies the unique ID of the metric to whose values these thresholds will be applied
api_key
Telemetry api key for the user
profile
A dict with telemetry config data. If present, will be used instead of
api_key.
"""
ret = {"name": metric_name, "result": True, "comment": "", "changes": {}}
is_present = __salt__["telemetry.get_alert_config"](
deployment_id, metric_name, api_key, profile
)
if is_present:
alert_id = is_present.get("_id")
if __opts__["test"]:
ret[
"comment"
] = "alert {} is set to be removed from deployment: {}.".format(
metric_name, deployment_id
)
ret["result"] = None
return ret
deleted, msg = __salt__["telemetry.delete_alarms"](
deployment_id,
alert_id,
is_present.get("condition", {}).get("metric"),
api_key,
profile,
)
if deleted:
ret["changes"]["old"] = metric_name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete alert {} from deployment: {}".format(
metric_name, msg
)
else:
ret["comment"] = "alarm on {} does not exist within {}.".format(
metric_name, deployment_id
)
return ret
|
saltstack/salt
|
salt/states/telemetry_alert.py
|
Python
|
apache-2.0
| 7,205
|
[
"ORCA"
] |
da7d46f8a66e55c7fc80f0527a3f6dd43a274abc68540a7215dd7eea9a44bde2
|
#!/usr/bin/env py27
from ecmwfapi import ECMWFDataServer
import numpy as np
import datetime as dt
import calendar
server = ECMWFDataServer()
dir = "/automount/agh/Projects/skiefer/"
# date_first_start = dt.date(1958, 1, 1)
# date_first_end = dt.date(1958, 1, 31)
# date_last_end = dt.date(1978, 12, 1)
# date_last_end = dt.date(1978, 12, 31)
year_first = 2015
year_last = 2015
years_vec = np.arange(year_first, (year_last + 1))
months_vec = np.arange(1, 13)
for i in range(len(years_vec)):
date_loop_start = dt.date(years_vec[i], 1, 1)
date_loop_end = dt.date(years_vec[i], 12, 31)
date_loop = str(date_loop_start)+'/to/'+str(date_loop_end)
target_loop = dir+str(date_loop_start.year)+'-ei.nc'
server.retrieve({
'class': "ei",
'dataset': "interim",
'date': date_loop,
'expver': "1",
'format': "netcdf",
'grid': "1.00/1.00",
'levelist': "100/150/200/250/300/400/500/600/700/775/850/925/1000",
'levtype': "pl",
'param': "129.128/130.128/131.128/132.128/135.128/155.128",
'step': "0",
'stream': "oper",
'target': target_loop,
'time': "00/06/12/18",
'type': "an"
})
print('Done successfully.')
|
sebaki/clim-jet-stream
|
01-code/01-ecmwf-py/b-get-era-int-daily-by-year.py
|
Python
|
mit
| 1,239
|
[
"NetCDF"
] |
7f4b8394e9b8475b45b8091d5b1b5e3af3d97f04df4c13082cabacb224ebd7f7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
====================
Pygame Video Surface
====================
Displays uncompressed RGB video data on a pygame surface using the Pygame
Display service.
Example Usage
-------------
Read raw YUV data from a file, convert it to interleaved RGB and display it
using VideoSurface::
imagesize = (352, 288) # "CIF" size video
fps = 15 # framerate of video
Pipeline(ReadFileAdapter("raw352x288video.yuv", ...other args...),
RawYUVFramer(imagesize),
MessageRateLimit(messages_per_second=fps, buffer=fps*2),
ToRGB_interleaved(),
VideoSurface(),
).activate()
RawYUVFramer is needed to frame raw YUV data into individual video frames.
ToRGB_interleaved is needed to convert the 3 planes of Y, U and V data to a
single plane containing RGB data interleaved (R, G, B, R, G, B, R, G, B, ...)
How does it work?
-----------------
The component waits to receive uncompressed video frames from its "inbox" inbox.
The frames must be encoded as dictionary objects in the format described below.
When the first frame is received, the component notes the size and pixel format
of the video data and requests an appropriate surface from the
Pygame Display service component, to which video can be rendered.
NOTE: Currently the only supported pixelformat is "RGB_interleaved".
When subsequent frames of video are received the rgb data is rendered to the
surface and the Pygame Display service is notified that the surface needs
redrawing.
At present, VideoSurface cannot cope with a change of pixel format or video
size mid sequence.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
Uncompresed video frames must be encoded as dictionaries. VideoSurface requires
the following entries::
{
"rgb" : rgbdata # a string containing RGB video data
"size" : (width, height) # in pixels
"pixformat" : "RGB_interleaved" # format of raw video data
}
"""
import Axon
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.Ipc import WaitComplete
from Kamaelia.UI.GraphicDisplay import PygameDisplay
import pygame
class VideoSurface(component):
"""\
VideoSurface([position]) -> new VideoSurface component
Displays a pygame surface using the Pygame Display service component, for
displaying RGB video frames sent to its "inbox" inbox.
The surface is sized and configured by the first frame of (uncompressed)
video data is receives.
Keyword arguments:
- position -- (x,y) pixels position of top left corner (default=(0,0))
"""
Inboxes = { "inbox" : "Video frame data structures containing RGB data",
"control" : "Shutdown messages: shutdownMicroprocess or producerFinished",
"callback" : "Receive callbacks from Pygame Display",
}
Outboxes = {
"outbox" : "NOT USED",
"signal" : "Shutdown signalling: shutdownMicroprocess or producerFinished",
"display_signal" : "Outbox used for sending signals of various kinds to the display service"
}
def __init__(self, position=None, size = None,resize=None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(VideoSurface, self).__init__()
self.display = None
self.size = size
self.pixformat = None
self.resize = resize
if position is not None:
self.position = position
else:
self.position = (0,0)
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def waitBox(self,boxname):
"""Generator. yield's 1 until data is ready on the named inbox."""
waiting = True
while waiting:
if self.dataReady(boxname): return
else: yield 1
def formatChanged(self, frame):
"""Returns True if frame size or pixel format is new/different for this frame."""
return frame['size'] != self.size or frame['pixformat'] != self.pixformat
def main(self):
"""Main loop."""
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
while 1:
# wait for a frame
frame = False
while not frame:
if self.dataReady("inbox"):
frame = self.recv("inbox")
if self.shutdown():
return
if not self.anyReady() and not frame:
self.pause()
yield 1
# is it the same format as our current frame?
if self.formatChanged(frame):
if self.display:
raise RuntimeError("Can't cope with a format change yet!")
self.size = frame['size']
self.pixformat = frame['pixformat']
if self.pixformat != "RGB_interleaved":
raise RuntimeError("Can't cope with any pixformat other than RGB_interleaved")
# request a surface
# build the initial request to send to Pygame Display to obtain a surface
# but store it away until main() main loop is activated.
surfacesize = self.size
if self.resize:
surfacesize = self.resize
dispRequest = { "DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"size": surfacesize,
"position" : self.position
}
self.send(dispRequest, "display_signal")
yield 1
# wait for the surface back
yield WaitComplete(self.waitBox("callback"))
self.display = self.recv("callback")
# now render our frame
image = pygame.image.fromstring(frame['rgb'], frame['size'], "RGB", False)
if self.resize:
image = pygame.transform.scale(image, self.resize)
self.display.blit(image, (0,0))
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
# deal with possible shutdown requests
if self.shutdown():
self.send(Axon.Ipc.producerFinished(message=self.display), "display_signal")
return
if not self.anyReady():
self.pause()
yield 1
__kamaelia_components__ = ( VideoSurface, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Codec.RawYUVFramer import RawYUVFramer
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
from Kamaelia.Video.PixFormatConversion import ToYUV420_planar
from Kamaelia.Video.PixFormatConversion import ToRGB_interleaved
# Pipeline( ReadFileAdaptor("/data/dirac-video/snowboard-jum-352x288x75.yuv", readmode="bitrate", bitrate = 2280960*8),
# RawYUVFramer(size=(352,288), pixformat = "YUV420_planar" ),
# ToRGB_interleaved(),
# VideoSurface(),
# ).run()
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.Util.Console import ConsoleEchoer
Pipeline( ReadFileAdaptor("/data/dirac-video/snowboard-jum-352x288x75.dirac.drc", readmode="bitrate", bitrate = 2280960*8),
DiracDecoder(),
ToRGB_interleaved(),
# ToYUV420_planar(),
# ToRGB_interleaved(),
# ConsoleEchoer(forwarder = True),
VideoSurface((352, 288)),
).run()
|
sparkslabs/kamaelia
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/UI/Pygame/VideoSurface.py
|
Python
|
apache-2.0
| 9,230
|
[
"DIRAC"
] |
54b8491a464c9ee8fc87b5d0a843242a360e5040c66bef14794e594fc36c46b8
|
# -*- coding: utf-8 -*-
#
# conncomp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers with nodes composed of one pyramidal cell
and one interneuron. Connect with two projections, one pyr->pyr, one
pyr->in, and visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'pyr'},
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': 1.0,
'delays': 1.0})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'mask': {'circular': {'radius': 1.0}},
'kernel': 0.2,
'weights': 1.0,
'delays': 1.0})
pylab.clf()
# plot targets of neurons in different grid locations
for ctr in [[15, 15]]:
# obtain node id for center: pick first node of composite
ctr_id = topo.GetElement(a, ctr)
# get all projection targets of center neuron
tgts = [ci[1] for ci in nest.GetConnections(ctr_id)]
# get positions of targets
tpyr = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if
nest.GetStatus([n], 'model')[0] == 'pyr'])))
tin = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if
nest.GetStatus([n], 'model')[0] == 'in'])))
# scatter-plot
pylab.scatter(tpyr[0] - 0.02, tpyr[1] - 0.02, 20, 'b', zorder=10)
pylab.scatter(tin[0] + 0.02, tin[1] + 0.02, 20, 'r', zorder=10)
# mark locations with background grey circle
pylab.plot(tpyr[0], tpyr[1], 'o', markerfacecolor=(0.7, 0.7, 0.7),
markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_')
pylab.plot(tin[0], tin[1], 'o', markerfacecolor=(0.7, 0.7, 0.7),
markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_')
# mark sender position with transparent red circle
ctrpos = topo.GetPosition(ctr_id)[0]
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.15, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask positions with open red/blue circles
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.5, zorder=2,
fc='none', ec='b', lw=3))
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=1.0, zorder=2,
fc='none', ec='r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-1.6, 1.6, -1.6, 1.6])
pylab.axes().set_aspect('equal', 'box')
|
tobikausk/nest-simulator
|
topology/examples/conncomp.py
|
Python
|
gpl-2.0
| 4,303
|
[
"NEURON"
] |
a6b119383a968a761a117995929a512a2220796bb5c92f04e71d48bff2af2ed7
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""
Tests for Lexer and related objects.
"""
import re
import unittest
from MooseDocs.tree import tokens
from MooseDocs.base import lexers
from MooseDocs.common import exceptions
from MooseDocs.extensions import core
class Proxy(object):
"""
Proxy class for Components.
"""
def __call__(self, *args):
pass
class TestGrammar(unittest.TestCase):
"""
Test Grammar object.
"""
def testPatterns(self):
"""
Test the multiple patterns can be added.
NOTE: The underlying Storage object that the Grammar class uses is thoroughly tested
in the test/common/test_Storage.py.
"""
grammar = lexers.Grammar()
grammar.add('foo', re.compile(''), Proxy())
grammar.add('bar', re.compile(''), Proxy())
self.assertEqual(grammar[0].name, 'foo')
self.assertEqual(grammar[1].name, 'bar')
self.assertEqual(grammar['foo'].name, 'foo')
self.assertEqual(grammar['bar'].name, 'bar')
class TestLexerInformation(unittest.TestCase):
"""
Test LexerInformation class that stores parsing data.
"""
def testInfo(self):
regex = re.compile(r'(?P<key>foo)')
match = regex.search('foo bar')
pattern = lexers.Pattern(name='name', regex=regex, function=Proxy())
info = lexers.LexerInformation(match=match, pattern=pattern, line=42)
self.assertEqual(info.line, 42)
self.assertEqual(info.pattern, 'name')
self.assertEqual(list(info.keys()), [0, 1, 'key'])
self.assertIn('key', info)
self.assertIn('line:42', str(info))
FooBar = tokens.newToken('FooBar', content='')
class FooBarComponent(object):
"""Class for testing lexer."""
def __call__(self, parent, info, page):
content = info['content']
if content in ('foo', 'bar'):
return FooBar(parent, content=content)
class WordComponent(object):
"""Class for testing lexer."""
def __call__(self, parent, info, page):
return core.Word(parent, content=info['content'])
class TestLexer(unittest.TestCase):
"""
Test basic operation of Lexer class.
"""
def testTokenize(self):
root = tokens.Token(None)
grammar = lexers.Grammar()
grammar.add('foo', re.compile('(?P<content>\w+) *'), FooBarComponent())
grammar.add('word', re.compile('(?P<content>\w+) *'), WordComponent())
lexer = lexers.Lexer()
# Basic
lexer.tokenize(root, 'foo bar', None, grammar)
self.assertEqual(root(0).name, 'FooBar')
self.assertEqual(root(0)['content'], 'foo')
self.assertEqual(root(1).name, 'FooBar')
self.assertEqual(root(1)['content'], 'bar')
# Fall through
root = tokens.Token(None)
lexer.tokenize(root, 'foo other bar', None, grammar)
self.assertEqual(root(0).name, 'FooBar')
self.assertEqual(root(0)['content'], 'foo')
self.assertEqual(root(1).name, 'Word')
self.assertEqual(root(1)['content'], 'other')
self.assertEqual(root(2).name, 'FooBar')
self.assertEqual(root(2)['content'], 'bar')
def testTokenizeWithExtraContent(self):
# Extra
root = tokens.Token(None)
grammar = lexers.Grammar()
grammar.add('foo', re.compile('(?P<content>\w+) *'), FooBarComponent())
lexer = lexers.Lexer()
lexer.tokenize(root, 'foo ???', None, grammar)
self.assertEqual(root(0).name, 'FooBar')
self.assertEqual(root(0)['content'], 'foo')
self.assertEqual(root(1).name, 'ErrorToken')
self.assertIn('Unprocessed', root(1)['message'])
Letters = tokens.newToken('Letters')
def letters_func(parent, info, page):
return Letters(parent)
Letter = tokens.newToken('Letter', content='')
def letter_func(parent, info, page):
return Letter(parent, content=info['content'])
class TestRecursiveLexer(unittest.TestCase):
"""
Test operation of RecursiveLexer class.
"""
def testTokenize(self):
lexer = lexers.RecursiveLexer('block', 'inline')
lexer.add('block', 'foo', re.compile('(?P<inline>\w+) *'), letters_func)
lexer.add('inline', 'bar', re.compile('(?P<content>\w)'), letter_func)
root = tokens.Token(None)
lexer.tokenize(root, 'foo', None, lexer.grammar('block'))
self.assertIsInstance(root(0), tokens.Token)
self.assertEqual(root(0).name, 'Letters')
self.assertEqual(root(0)(0).name, 'Letter')
self.assertEqual(root(0)(0)['content'], 'f')
self.assertEqual(root(0)(1).name, 'Letter')
self.assertEqual(root(0)(1)['content'], 'o')
self.assertEqual(root(0)(2).name, 'Letter')
self.assertEqual(root(0)(2)['content'], 'o')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nuclear-wizard/moose
|
python/MooseDocs/test/base/test_lexers.py
|
Python
|
lgpl-2.1
| 5,154
|
[
"MOOSE"
] |
e93d78b49e4ffd85c5fe8bc1cc91ac1afe874a8b283db3b0b41e6a1793cc348a
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import unittest as ut
import unittest_decorators as utx
from tests_common import params_match
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures("ELECTROSTATICS")
class P3MGPU_test(ut.TestCase):
def test(self):
from espressomd.electrostatics import P3MGPU
es = espressomd.System(box_l=[10.0, 10.0, 10.0])
es.seed = es.cell_system.get_state()['n_nodes'] * [1234]
test_params = {}
test_params["prefactor"] = 2
test_params["cao"] = 2
test_params["r_cut"] = 0.9
test_params["accuracy"] = 1e-1
test_params["mesh"] = [10, 10, 10]
test_params["epsilon"] = 20.0
test_params["alpha"] = 1.1
test_params["tune"] = False
p3m = P3MGPU(**test_params)
es.actors.add(p3m)
self.assertTrue(params_match(test_params, p3m.get_params()))
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/p3m_gpu.py
|
Python
|
gpl-3.0
| 1,621
|
[
"ESPResSo"
] |
b6c033757af26dcdf58764ae46d06113eaa43de9a3b915f85e563e9a02ad71a0
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 1000
n = 2000
k = 1500
testMehrotra = True
testIPF = False
testADMM = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a semidefinite matrix
def Semidefinite(height):
Q = El.DistMatrix()
El.Identity( Q, height, height )
return Q
# Make a dense matrix
def RectangDense(height,width):
A = El.DistMatrix()
El.Gaussian( A, height, width )
return A
Q = Semidefinite(n)
A = RectangDense(m,n)
G = RectangDense(k,n)
# Generate a (b,h) which implies a primal feasible (x,s)
# =====================================================
# b := A xGen
# -----------
xGen = El.DistMatrix()
El.Uniform(xGen,n,1,0.5,0.4999)
b = El.DistMatrix()
El.Zeros( b, m, 1 )
El.Gemv( El.NORMAL, 1., A, xGen, 0., b )
# h := G xGen + sGen
# ------------------
sGen = El.DistMatrix()
El.Uniform(sGen,k,1,0.5,0.5)
h = El.DistMatrix()
El.Copy( sGen, h )
El.Gemv( El.NORMAL, 1., G, xGen, 1., h )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMatrix()
El.Gaussian(yGen,m,1)
zGen = El.DistMatrix()
El.Uniform(zGen,k,1,0.5,0.5)
c = El.DistMatrix()
El.Zeros(c,n,1)
El.Hemv( El.LOWER, -1, Q, xGen, 1., c )
El.Gemv( El.TRANSPOSE, -1., A, yGen, 1., c )
El.Gemv( El.TRANSPOSE, -1., G, zGen, 1., c )
if display:
El.Display( Q, "Q" )
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.QPAffineCtrl_d()
xOrig = El.DistMatrix()
yOrig = El.DistMatrix()
zOrig = El.DistMatrix()
sOrig = El.DistMatrix()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,k,1,0.5,0.4999)
El.Uniform(sOrig,k,1,0.5,0.4999)
x = El.DistMatrix()
y = El.DistMatrix()
z = El.DistMatrix()
s = El.DistMatrix()
if testMehrotra:
ctrl.approach = El.QP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startMehrotra = time.clock()
El.QPAffine(Q,A,G,b,c,h,x,y,z,s,ctrl)
endMehrotra = time.clock()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
El.Display( s, "s Mehrotra" )
d = El.DistMatrix()
El.Zeros( d, n, 1 )
El.Hemv( El.LOWER, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "Mehrotra (1/2) x^T Q x + c^T x =", obj
if testIPF:
ctrl.approach = El.QP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startIPF = time.clock()
El.QPAffine(Q,A,G,b,c,h,x,y,z,s,ctrl)
endIPF = time.clock()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
El.Display( s, "s IPF" )
d = El.DistMatrix()
El.Zeros( d, n, 1 )
El.Hemv( El.LOWER, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
justusc/Elemental
|
examples/interface/QPAffineDense.py
|
Python
|
bsd-3-clause
| 3,907
|
[
"Gaussian"
] |
b17c723bfa4d60d06489a277a3e9f291771a0b7f496f08d3e95c0c4a82949997
|
import unittest
from pyaria2 import PyAria2
from random import choice
from time import sleep
import os
import shutil
ENABLE_LONG_TESTS = False
ENABLE_VERY_LONG_TESTS = False
def setUpModule():
try:
os.mkdir("tests/trash")
except OSError:
pass
def tearDownModule():
shutil.rmtree("tests/trash")
class Aria2TestCase(unittest.TestCase):
def setUp(self):
self.known_ports = [1025, 1026, 1027]
self.insecure_aria = PyAria2('localhost', self.known_ports[0])
self.secure_aria = PyAria2('localhost', self.known_ports[1], rpcSecret={"useSecret":True, "secret":"welovemiyuki"})
# def tearDown(self):
# self.insecure_aria.shutdown()
# self.secure_aria.shutdown()
class TestRunningAria(Aria2TestCase):
def test_constrainedPortNumber(self):
#we just test > 65535 and < 1024: we have a lot of testing for correct ports :D
self.assertRaises(AssertionError, PyAria2, ('localhost',), port=50)
self.assertRaises(AssertionError, PyAria2, ('localhost',), port=100000)
self.assertRaises(AssertionError, PyAria2, ('localhost',), port=50, rpcSecret={"useSecret":True, "secret":"mfw"})
self.assertRaises(AssertionError, PyAria2, ('localhost',), port=100000, rpcSecret={"useSecret":True, "secret":"mfw"})
@unittest.skipUnless(ENABLE_VERY_LONG_TESTS, True)
def test_isRunning(self):
def test_it(ariaObj):
self.assertEqual(ariaObj.isAria2rpcRunning(), True)
ariaObj.forceShutdown()
sleep(3)
self.assertEqual(ariaObj.isAria2rpcRunning(), False)
test_it(PyAria2("localhost", self.known_ports[2]))
test_it(PyAria2("localhost", self.known_ports[2],
rpcSecret={"useSecret":True, "secret":"asagiaqt"}))
class TestUri(Aria2TestCase):
def perform_UriAdditionWithFolder(self, ariaObj, uri):
return ariaObj.addUri((uri,), options={"dir":"tests/trash"})
def test_addUriWithFolder(self):
#FIXME: set big downloads here, we'll have the time to check active data!
URI = ["http://alfa.moe/sc/0fh17b5", "http://alfa.moe/sc/pn2hzaj"]
self.perform_UriAdditionWithFolder(self.insecure_aria, URI[0])
self.perform_UriAdditionWithFolder(self.secure_aria, URI[1])
class TestTorrent(Aria2TestCase):
def perform_TorrentAdditionWithFolder(self, ariaObj, torrent):
ariaObj.addTorrent(torrent, options={"dir":"tests/trash"})
def test_addTorrentWithFolder(self):
#FIXME: download these torrents via aria objects w
URI = ["./tests/with_torrents/nisemono.torrent"]*2
self.perform_TorrentAdditionWithFolder(self.insecure_aria, URI[0])
self.perform_TorrentAdditionWithFolder(self.secure_aria, URI[1])
|
alfateam123/pyaria2
|
tests/test_pyaria2.py
|
Python
|
mit
| 2,870
|
[
"MOE"
] |
7533fc72a01234cf69ce6dbb063a5f5b2572c7ad0ed0c16a5091543dada74a75
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import espressopp
def info(system, integrator, per_atom=False):
"""
reports on the simulation progress
"""
NPart = espressopp.analysis.NPart(system).compute()
T = espressopp.analysis.Temperature(system).compute()
P = espressopp.analysis.Pressure(system).compute()
Pij = espressopp.analysis.PressureTensor(system).compute()
step = integrator.step
Ek = (3.0/2.0) * NPart * T
Epot = []
Etotal = 0.0
if per_atom:
tot = '%5d %10.4f %10.6f %10.6f %12.8f' % (step, T, P, Pij[3], Ek/NPart)
else:
tot = '%5d %10.4f %10.6f %10.6f %12.3f' % (step, T, P, Pij[3], Ek)
tt = ''
for k in xrange(system.getNumberOfInteractions()):
e = system.getInteraction(k).computeEnergy()
Etotal += e
if per_atom:
tot += ' %12.8f' % (e/NPart)
tt += ' e%i/N ' % k
else:
tot += ' %12.3f' % e
tt += ' e%i ' % k
if per_atom:
tot += ' %12.8f' % (Etotal/NPart + Ek/NPart)
tt += ' etotal/N '
else:
tot += ' %12.3f' % (Etotal + Ek)
tt += ' etotal '
tot += ' %12.8f\n' % system.bc.boxL[0]
tt += ' boxL \n'
if step == 0:
if per_atom:
sys.stdout.write(' step T P Pxy ekin/N ' + tt)
else:
sys.stdout.write(' step T P Pxy ekin ' + tt)
sys.stdout.write(tot)
def final_info(system, integrator, vl, start_time, end_time):
"""
final report on the simulation statistics
"""
NPart = espressopp.analysis.NPart(system).compute()
espressopp.tools.timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(NPart)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPUs = %i CPU time per CPU = %.5f\n' % (espressopp.MPI.COMM_WORLD.size, end_time - start_time))
|
kkreis/espressopp
|
src/tools/analyse.py
|
Python
|
gpl-3.0
| 2,918
|
[
"ESPResSo"
] |
04e45ab9357c3b7d840cb58f78e82d6696f5894f8c98a668847f0c36fdd38f81
|
# Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
=======================================================================
Consistent units definitions for molecular systems (:mod:`dnacc.units`)
=======================================================================
.. currentmodule:: dnacc.units
The internal base units are GROMACS units, which are useful,
consistent, and internally yield numbers that are close to 1 in
most atomistic setups:
- Length = nm
- Mass = amu
- Time = ps (together yield natural energy unit of kJ/mol)
- Current = e / ps
- Temperature = K
Electrostatic formulas have all the usual SI prefactors. We don't
bother with the units relating to light (cd and related units) since
they are mostly irrelevant for condensed matter simulations.
Initially, we express the SI base units in terms of GROMACS units,
then derive all other units from them. For internal convenience,
we actually use g instead of kg for defining masses.
Almost all unit definitions also set up the related prefixed units
from femto-X to tera-X.
Whenever you read in a unitful quantity from the user, multiply
it by the relevant units. For example,
>>> myLength = fields[5] * units.nm
Whenever you output unitful quantities, divide by the units you want
to use. For example,
>>> print("Total energy = %.2f kcal/mol" %
(E / (units.kcal / units.mol)))
.. note::
The unit "Celsius" is not defined explicitly to not confuse it with
"Coulombs".
Base units
++++++++++
All of these can be prefixed with the usual SI prefixes, e.g. ``nm`` for
nanometer.
.. autodata:: m
.. autodata:: g
.. autodata:: s
.. autodata:: Ampere
.. autodata:: K
.. autodata:: mol
By default, the unit for Amperes is :data:`.Ampere`, not ``A`` as usual.
To change this behaviour, call :func:`.add_amperes_unit`.
.. autofunction:: add_amperes_unit
Derived units
+++++++++++++
Most of these can also be prefixed with the usual SI prefixes, e.g. ``pN``
for pico-Newton.
.. autodata:: Hz
.. autodata:: rad
.. autodata:: sr
.. autodata:: N
.. autodata:: Pa
.. autodata:: J
.. autodata:: W
.. autodata:: C
.. autodata:: V
.. autodata:: F
.. autodata:: Ohm
.. autodata:: S
.. autodata:: Wb
.. autodata:: T
.. autodata:: H
Non-SI units
++++++++++++
.. autodata:: min
.. autodata:: h
.. autodata:: d
.. autodata:: degree
.. autodata:: arcmin
.. autodata:: arcsec
.. autodata:: ha
.. autodata:: L
.. autodata:: t
Physics units
+++++++++++++
Some units which are more properly considered physical constants are
defined in the :mod:`.physics` module.
.. autodata:: G
.. autodata:: bar
.. autodata:: atm
.. autodata:: Torr
.. autodata:: mmHg
.. autodata:: P
Chemistry units
+++++++++++++++
Units that pop up regularly in chemistry.
.. autodata:: AA
.. autodata:: cal
.. autodata:: kcal
.. autodata:: M
.. autodata:: cc
"""
from math import pi
_GSL_CONST_MKSA_UNIFIED_ATOMIC_MASS = 1.660538782e-27 # kg
_GSL_CONST_MKSA_ELECTRON_CHARGE = 1.602176487e-19 # A s
_GSL_CONST_NUM_AVOGADRO = 6.02214199e23 # 1 / mol
_GSL_CONST_MKSA_GAUSS = 1e-4 # kg / A s^2
_GSL_CONST_MKSA_BAR = 1e5 # kg / m s^2
_GSL_CONST_MKSA_STD_ATMOSPHERE = 1.01325e5 # kg / m s^2
_GSL_CONST_MKSA_TORR = 1.33322368421e2 # kg / m s^2
_GSL_CONST_MKSA_METER_OF_MERCURY = 1.33322368421e5 # kg / m s^2
#: Meter
m = 1e9
#: Gram
g = 1e-3 / _GSL_CONST_MKSA_UNIFIED_ATOMIC_MASS
#: Second
s = 1e12
#: Ampere
Ampere = (1. / _GSL_CONST_MKSA_ELECTRON_CHARGE) / s
#: Kelvin
K = 1.0
#: mole
mol = _GSL_CONST_NUM_AVOGADRO
def add_amperes_unit():
"""Define ``A`` as the unit Ampere.
By default, this definition is disabled to avoid accidental mixups with
the unit for Angstroms (:data:`.AA`). This mixup can result in some
thoroughly puzzling bugs."""
global A
A = Ampere
_SI_prefixes = {
'f': 1e-15,
'p': 1e-12,
'n': 1e-9,
'u': 1e-6,
'm': 1e-3,
'c': 1e-2,
'd': 1e-1,
'da': 1e+1,
'h': 1e+2,
'k': 1e+3,
'M': 1e+6,
'G': 1e+9,
'T': 1e+12
}
def _add_prefixes(name, realname=None):
"""Add standard SI prefixes to a base unit."""
globs = globals()
val = globs[realname or name]
for prefix, factor in _SI_prefixes.items():
globs[prefix + name] = factor * val
_add_prefixes('m')
_add_prefixes('g')
_add_prefixes('s')
_add_prefixes('A', 'Ampere')
_add_prefixes('K')
_add_prefixes('mol')
# Named derived units, after
# http://en.wikipedia.org/wiki/SI_derived_unit
#: Hertz
Hz = 1 / s
#: Radian
rad = m / m
#: Steradian
sr = m ** 2 / m ** 2
#: Newton
N = kg * m / s ** 2
#: Pascal
Pa = N / m ** 2
#: Joule
J = N * m
#: Watt
W = J / s
#: Coulomb
C = Ampere * s
#: Volt
V = J / C
#: Farad
F = C / V
#: Ohm
Ohm = V / Ampere
#: Siemens
S = 1 / Ohm
#: Weber
Wb = J / Ampere
#: Tesla
T = Wb / m ** 2
#: Henry
H = Wb / Ampere
# degree C is dangerous, confused with Coulombs
#degC = K # Celsius
# Some of the other units are irrelevant for what I do...
_add_prefixes('Hz')
_add_prefixes('N')
_add_prefixes('Pa')
_add_prefixes('J')
_add_prefixes('W')
_add_prefixes('C')
_add_prefixes('V')
_add_prefixes('F')
_add_prefixes('Ohm')
# Now some official non-SI units, after
# http://en.wikipedia.org/wiki/Non-SI_units_accepted_for_use_with_SI
#: minute
min = 60 * s
#: hour
h = 60 * min
#: day
d = 24 * h
#: degree of arc
degree = (pi / 180.0)
#: arc-minute
arcmin = degree / 60.0
#: arc-second
arcsec = arcmin / 60.0
#: hectare
ha = 10000 * m ** 2
#: litre
L = dm ** 3
#: tonne
t = 1e3 * kg
# Some physics-based units are defined in physics.py, some here
#: Gauss
G = _GSL_CONST_MKSA_GAUSS * T
#: bar (prefixed versions available, e.g. mbar = millibar)
bar = _GSL_CONST_MKSA_BAR * Pa
_add_prefixes('bar')
#: atmosphere
atm = _GSL_CONST_MKSA_STD_ATMOSPHERE * Pa
#: Torr
Torr = _GSL_CONST_MKSA_TORR * Pa
#: mmHg
mmHg = 1e-3 * _GSL_CONST_MKSA_METER_OF_MERCURY * Pa
# Viscosity
#: Poise (prefixed versions available, e.g. cP = centipoise)
P = 1 * g / (cm * s)
_add_prefixes('P')
# Some useful units in chemistry
# ==============================
#: Angstrom. See also :func:`.add_amperes_unit`.
AA = 1e-10 * m
cal = 4.184 * J
"""Thermochemical calorie
.. note::
The calorie here is the thermochemical calorie (4.184 J), not the
International Steam Table calorie (4.1868 J) used in GSL.
"""
#: kilocalorie
kcal = 1e3 * cal
#: molar (prefixed versions available, e.g. mM = millimolar)
M = mol / L
_add_prefixes('M')
#: cubic centimeter
cc = cm ** 3
|
patvarilly/DNACC
|
dnacc/units.py
|
Python
|
gpl-3.0
| 7,113
|
[
"Gromacs"
] |
7a82b3cdbcbeb74e0435d243b0ebc7ed87e15bc9a04d9a8473e41d6a7067a895
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 2000
n = 4000
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < width:
A.QueueUpdate( s, s, 11 )
if s >= 1 and s-1 < width:
A.QueueUpdate( s, s-1, -1 )
if s+1 < width:
A.QueueUpdate( s, s+1, 2 )
if s >= height and s-height < width:
A.QueueUpdate( s, s-height, -3 )
if s+height < width:
A.QueueUpdate( s, s+height, 4 )
# The dense last column
A.QueueUpdate( s, width-1, -5/height );
A.ProcessQueues()
return A
A = Rectang(m,n)
# Generate a b which implies a primal feasible x
# ==============================================
xGen = El.DistMultiVec()
El.Uniform(xGen,n,1,0.5,0.5)
b = El.DistMultiVec()
El.Zeros( b, m, 1 )
El.Multiply( El.NORMAL, 1., A, xGen, 0., b )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMultiVec()
El.Gaussian(yGen,m,1)
c = El.DistMultiVec()
El.Uniform(c,n,1,0.5,0.5)
El.Multiply( El.TRANSPOSE, -1., A, yGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
El.Display( c, "c" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.LPDirectCtrl_d(isSparse=True)
xOrig = El.DistMultiVec()
yOrig = El.DistMultiVec()
zOrig = El.DistMultiVec()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,n,1,0.5,0.4999)
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startMehrotra = El.mpi.Time()
El.LPDirect(A,b,c,x,y,z,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print "Mehrotra c^T x =", obj
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startIPF = El.mpi.Time()
El.LPDirect(A,b,c,x,y,z,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/LPDirect.py
|
Python
|
bsd-3-clause
| 3,471
|
[
"Gaussian"
] |
0da09b6defe420afcf655ae32961d29a22cbf82129eec8a381a44ae425c5a006
|
'''
Arthur Glowacki
APS ANL
10/17/2014
'''
import vtk
from Model import Model
from random import random
class CubeModel(Model):
def __init__(self):
Model.__init__(self)
self.source = vtk.vtkCubeSource()
self.source.SetCenter(0.0, 0.0, 0.0)
self.source.SetXLength(1.0)
self.source.SetYLength(1.0)
self.source.SetZLength(1.0)
self.source.Update()
self.Update()
class SphereModel(Model):
def __init__(self):
Model.__init__(self)
self.source = vtk.vtkSphereSource()
self.source.SetCenter(0.0, 0.0, 0.0)
self.source.SetRadius(0.5)
self.source.Update()
self.Update()
class MultiSphereModel(Model):
def __init__(self, amt, radius):
Model.__init__(self)
self.source = vtk.vtkAppendPolyData()
for i in range(amt):
opX = 1.0
opY = 1.0
opZ = 1.0
if random() > 0.5:
opX *= -1.0
if random() > 0.5:
opY *= -1.0
if random() > 0.5:
opZ *= -1.0
sRad = 0.25 + ( random() * 0.25 )
x = float(random() * radius) * opX
y = float(random() * radius) * opY
z = float(random() * radius) * opZ
s = vtk.vtkSphereSource()
s.SetCenter(x,y,z)
s.SetRadius(float(sRad))
s.Update()
self.source.AddInput(s.GetOutput())
#add center
s = vtk.vtkSphereSource()
s.SetCenter(0.0, 0.0, 0.0)
s.SetRadius(0.5)
s.Update()
self.source.AddInput(s.GetOutput())
self.Update()
|
aglowacki/ScanSimulator
|
PrimitiveModels.py
|
Python
|
gpl-2.0
| 1,340
|
[
"VTK"
] |
8dbbf19f3d86e416de37fe5f7efbb4482059a9a96f8766c82d207f76c3e43cef
|
symbols = [
["Lower-case Greek",
5,
r"""\alpha \beta \gamma \chi \delta \epsilon \eta \iota \kappa
\lambda \mu \nu \omega \phi \pi \psi \rho \sigma \tau \theta
\upsilon \xi \zeta \digamma \varepsilon \varkappa \varphi
\varpi \varrho \varsigma \vartheta"""],
["Upper-case Greek",
6,
r"""\Delta \Gamma \Lambda \Omega \Phi \Pi \Psi \Sigma \Theta
\Upsilon \Xi \mho \nabla"""],
["Hebrew",
4,
r"""\aleph \beth \daleth \gimel"""],
["Delimiters",
6,
r"""| \{ \lfloor / \Uparrow \llcorner \vert \} \rfloor \backslash
\uparrow \lrcorner \| \langle \lceil [ \Downarrow \ulcorner
\Vert \rangle \rceil ] \downarrow \urcorner"""],
["Big symbols",
5,
r"""\bigcap \bigcup \bigodot \bigoplus \bigotimes \biguplus
\bigvee \bigwedge \coprod \oint \prod \sum \int"""],
["Standard function names",
4,
r"""\arccos \csc \ker \min \arcsin \deg \lg \Pr \arctan \det \lim
\gcd \ln \sup \cot \hom \log \tan \coth \inf \max \tanh
\sec \arg \dim \liminf \sin \cos \exp \limsup \sinh \cosh"""],
["Binary operation and relation symbols",
3,
r"""\ast \pm \slash \cap \star \mp \cup \cdot \uplus
\triangleleft \circ \odot \sqcap \triangleright \bullet \ominus
\sqcup \bigcirc \oplus \wedge \diamond \oslash \vee
\bigtriangledown \times \otimes \dag \bigtriangleup \div \wr
\ddag \barwedge \veebar \boxplus \curlywedge \curlyvee \boxminus
\Cap \Cup \boxtimes \bot \top \dotplus \boxdot \intercal
\rightthreetimes \divideontimes \leftthreetimes \equiv \leq \geq
\perp \cong \prec \succ \mid \neq \preceq \succeq \parallel \sim
\ll \gg \bowtie \simeq \subset \supset \Join \approx \subseteq
\supseteq \ltimes \asymp \sqsubset \sqsupset \rtimes \doteq
\sqsubseteq \sqsupseteq \smile \propto \dashv \vdash \frown
\models \in \ni \notin \approxeq \leqq \geqq \lessgtr \leqslant
\geqslant \lesseqgtr \backsim \lessapprox \gtrapprox \lesseqqgtr
\backsimeq \lll \ggg \gtreqqless \triangleq \lessdot \gtrdot
\gtreqless \circeq \lesssim \gtrsim \gtrless \bumpeq \eqslantless
\eqslantgtr \backepsilon \Bumpeq \precsim \succsim \between
\doteqdot \precapprox \succapprox \pitchfork \Subset \Supset
\fallingdotseq \subseteqq \supseteqq \risingdotseq \sqsubset
\sqsupset \varpropto \preccurlyeq \succcurlyeq \Vdash \therefore
\curlyeqprec \curlyeqsucc \vDash \because \blacktriangleleft
\blacktriangleright \Vvdash \eqcirc \trianglelefteq
\trianglerighteq \neq \vartriangleleft \vartriangleright \ncong
\nleq \ngeq \nsubseteq \nmid \nsupseteq \nparallel \nless \ngtr
\nprec \nsucc \subsetneq \nsim \supsetneq \nVDash \precnapprox
\succnapprox \subsetneqq \nvDash \precnsim \succnsim \supsetneqq
\nvdash \lnapprox \gnapprox \ntriangleleft \ntrianglelefteq
\lneqq \gneqq \ntriangleright \lnsim \gnsim \ntrianglerighteq
\coloneq \eqsim \nequiv \napprox \nsupset \doublebarwedge \nVdash
\Doteq \nsubset \eqcolon \ne
"""],
["Arrow symbols",
2,
r"""\leftarrow \longleftarrow \uparrow \Leftarrow \Longleftarrow
\Uparrow \rightarrow \longrightarrow \downarrow \Rightarrow
\Longrightarrow \Downarrow \leftrightarrow \updownarrow
\longleftrightarrow \updownarrow \Leftrightarrow
\Longleftrightarrow \Updownarrow \mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow \leftharpoonup
\rightharpoonup \swarrow \leftharpoondown \rightharpoondown
\nwarrow \rightleftharpoons \leadsto \dashrightarrow
\dashleftarrow \leftleftarrows \leftrightarrows \Lleftarrow
\Rrightarrow \twoheadleftarrow \leftarrowtail \looparrowleft
\leftrightharpoons \curvearrowleft \circlearrowleft \Lsh
\upuparrows \upharpoonleft \downharpoonleft \multimap
\leftrightsquigarrow \rightrightarrows \rightleftarrows
\rightrightarrows \rightleftarrows \twoheadrightarrow
\rightarrowtail \looparrowright \rightleftharpoons
\curvearrowright \circlearrowright \Rsh \downdownarrows
\upharpoonright \downharpoonright \rightsquigarrow \nleftarrow
\nrightarrow \nLeftarrow \nRightarrow \nleftrightarrow
\nLeftrightarrow \to \Swarrow \Searrow \Nwarrow \Nearrow
\leftsquigarrow
"""],
["Miscellaneous symbols",
3,
r"""\neg \infty \forall \wp \exists \bigstar \angle \partial
\nexists \measuredangle \eth \emptyset \sphericalangle \clubsuit
\varnothing \complement \diamondsuit \imath \Finv \triangledown
\heartsuit \jmath \Game \spadesuit \ell \hbar \vartriangle \cdots
\hslash \vdots \blacksquare \ldots \blacktriangle \ddots \sharp
\prime \blacktriangledown \Im \flat \backprime \Re \natural
\circledS \P \copyright \ss \circledR \S \yen \AA \checkmark \$
\iiint \iint \iint \oiiint"""]
]
def run(state_machine):
def get_n(n, l):
part = []
for x in l:
part.append(x)
if len(part) == n:
yield part
part = []
yield part
lines = []
for category, columns, syms in symbols:
syms = syms.split()
syms.sort()
lines.append("**%s**" % category)
lines.append('')
max_width = 0
for sym in syms:
max_width = max(max_width, len(sym))
max_width = max_width * 2 + 16
header = " " + (('=' * max_width) + ' ') * columns
format = '%%%ds' % max_width
for chunk in get_n(20, get_n(columns, syms)):
lines.append(header)
for part in chunk:
line = []
for sym in part:
line.append(format % (":math:`%s` ``%s``" % (sym, sym)))
lines.append(" " + " ".join(line))
lines.append(header)
lines.append('')
state_machine.insert_input(lines, "Symbol table")
return []
try:
from docutils.parsers.rst import Directive
except ImportError:
from docutils.parsers.rst.directives import _directives
def math_symbol_table_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(state_machine)
math_symbol_table_directive.arguments = None
math_symbol_table_directive.options = {}
math_symbol_table_directive.content = False
_directives['math_symbol_table'] = math_symbol_table_directive
else:
class math_symbol_table_directive(Directive):
has_content = False
def run(self):
return run(self.state_machine)
from docutils.parsers.rst import directives
directives.register_directive('math_symbol_table',
math_symbol_table_directive)
if __name__ == "__main__":
# Do some verification of the tables
from matplotlib import _mathtext_data
print "SYMBOLS NOT IN STIX:"
all_symbols = {}
for category, columns, syms in symbols:
if category == "Standard Function Names":
continue
syms = syms.split()
for sym in syms:
if len(sym) > 1:
all_symbols[sym[1:]] = None
if sym[1:] not in _mathtext_data.tex2uni:
print sym
print "SYMBOLS NOT IN TABLE:"
for sym in _mathtext_data.tex2uni:
if sym not in all_symbols:
print sym
|
M-R-Houghton/euroscipy_2015
|
stats/_sphinxext/math_symbol_table.py
|
Python
|
mit
| 7,432
|
[
"Bowtie"
] |
813deaaf3d2eb277de295cb150a4b8d1e347d342e43c3df31d8187c7f94fe25c
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
try:
import json
except:
import simplejson as json
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = find_sdk(config)
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
html = open(os.path.join(docdir,file)).read()
if file.lower().endswith('.md'):
html = markdown.markdown(html)
else:
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','bencoding.sms.js')
if not os.path.exists(js_file): return
sdk = find_sdk(config)
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
metadata = compiler.make_function_from_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','BencodingSmsModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','bencoding.sms.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
benbahrenburg/benCoding.SMS
|
build.py
|
Python
|
apache-2.0
| 6,402
|
[
"VisIt"
] |
286654d5dbf933a79307fe922d684b3926fc027e8163e1933b0828bfe6f90481
|
"""riboSeqR Galaxy unit tests"""
import unittest
from riboseqr import utils
class PrepareTestCase(unittest.TestCase):
def test_process_args(self):
"""Test processing arguments. """
# "ATG" -> c("ATG")
rs = utils.process_args('ATG', ret_mode='charvector')
self.assertEqual(rs, 'c("ATG")','Return string as a character vector.')
# stop codons "TAG,TAA,TGA" -> c("TAG", "TAA", "TGA"). Also
# replicate names, seqnames.
rs = utils.process_args('TAG,TAA,TGA', ret_mode='charvector')
self.assertEqual(
rs, "c('TAG', 'TAA', 'TGA')",
'Return comma separated strings as a character vector.')
# "" -> None
rs = utils.process_args('')
self.assertIsNone(rs, 'Return empty string as None.')
# "27,28" -> c(27, 28)
rs = utils.process_args("27,28", ret_type='int', ret_mode='charvector')
self.assertEqual(
rs, "c(27, 28)", 'Return number strings as a character vector.')
# "27,28" -> [27, 28]
rs = utils.process_args("27,28", ret_type='int', ret_mode='list')
self.assertEqual(rs, [27, 28], 'Return number strings as a list.')
# "0,2" -> list(0,2)
rs = utils.process_args("0,2", ret_type='int', ret_mode='listvector')
self.assertEqual(
rs, "list(0, 2)", 'Return number strings as a list vector.')
# "50" -> 50
rs = utils.process_args("50", ret_type='int')
self.assertEqual(rs, 50, 'Return number string as a number.')
# "-200" -> -200
rs = utils.process_args("-200", ret_type='int')
self.assertEqual(rs, -200, 'Return number string as a number.')
# "TRUE" -> TRUE
rs = utils.process_args("TRUE", ret_type='bool')
self.assertEqual(rs, 'TRUE', 'Return bool string as bool.')
# 'chlamy17,chlamy3' -> 'chlamy17,chlamy3' for ribo and rna names
rs = utils.process_args('chlamy17,chlamy3')
self.assertEqual(rs, 'chlamy17,chlamy3', 'Return csv string as string.')
# 'chlamy17.idx, chlamy3.idx' -> ['chlamy17.idx', 'chlamy3.idx']
rs = utils.process_args('chlamy17.idx, chlamy3.idx', ret_mode='list')
self.assertEqual(rs, ['chlamy17.idx', 'chlamy3.idx'],
'Return files as a list.')
|
vimalkvn/riboseqr_wrapper
|
tests/test_riboseqr.py
|
Python
|
gpl-2.0
| 2,326
|
[
"Galaxy"
] |
5dcd0e18a12721c8f9b5cea53ed96b10e7a72c31bb200f2cc36d8fdc3999c575
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Modified to support stand-alone Galaxy documentation
# Copyright (c) 2014, Juniper Networks Inc.
# 2014, Rick Sherman
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import sys
import datetime
import cgi
from jinja2 import Environment, FileSystemLoader
import ansible.utils
from ansible.utils import module_docs
#####################################################################################
# constants and paths
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
MODULEDIR = "../library/"
OUTPUTDIR = "."
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r'``' + r"\1" + r"``", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, outputname, module, output_dir=None):
''' dumps module output to a file or the screen, as requested '''
if output_dir is not None:
f = open(os.path.join(output_dir, outputname % module), 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print text
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def process_module(fname, template, outputname):
print MODULEDIR + fname
doc, examples, returndocs = module_docs.get_docstring(MODULEDIR + fname)
all_keys = []
if 'version_added' not in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n".format(fname))
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if added:
added_tokens = str(added).split(".")
added = added_tokens[0] + "." + added_tokens[1]
added_float = float(added)
for (k, v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = examples # plain text
# here is where we build the table of contents...
text = template.render(doc)
write_data(text, outputname, fname, OUTPUTDIR)
#####################################################################################
def main():
env, template, outputname = jinja2_environment('.', 'rst')
modules = []
for module in os.listdir(MODULEDIR):
if module.startswith("junos_"):
process_module(module, template, outputname)
modules.append(module)
index_file_path = os.path.join(OUTPUTDIR, "index.rst")
index_file = open(index_file_path, "w")
index_file.write('Juniper.junos Ansible Modules\n')
index_file.write('=================================================\n')
index_file.write('\n')
index_file.write('Contents:\n')
index_file.write('\n')
index_file.write('.. toctree::\n')
index_file.write(' :maxdepth: 1\n')
index_file.write('\n')
for module in modules:
index_file.write(' %s\n' % module)
if __name__ == '__main__':
main()
|
lamoni/ansible-junos-stdlib
|
docs/ansible2rst.py
|
Python
|
apache-2.0
| 6,013
|
[
"Galaxy"
] |
813ea9743d80a1821681f66dd51e5ec18039d61c4598eaf4e6ee0c17f2f5a8d4
|
#!/usr/bin/env python3
########################################################################
# Solving this with Biopython, because I might as well get used to
# the libraries here.
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import sys
if __name__=="__main__":
print("The Biopython values are different from the problem values,")
print("so this won't work for an answer to the problem.")
print("-"*60)
for line in sys.stdin:
print(ProteinAnalysis(line.strip()).molecular_weight())
exit(0)
|
andrew-quinn/rosalind-exercises
|
problems/level-5/prtm/prtm-biopython.py
|
Python
|
mit
| 532
|
[
"Biopython"
] |
89db8c4e4c3b6c3823d4654cb65b7c0133d5c5b857592e5d1d9f357fad5c4319
|
# -*- coding: UTF-8 -*-
"""
Based on ``behave tutorial``
Scenario Outline: Blenders
Given I put <thing> in a blender
When I switch the blender on
Then it should transform into <other thing>
Examples: Amphibians
| thing | other thing |
| Red Tree Frog | mush |
| apples | apple juice |
Examples: Consumer Electronics
| thing | other thing |
| iPhone | toxic waste |
| Galaxy Nexus | toxic waste |
"""
# @mark.domain_model
# -----------------------------------------------------------------------------
# DOMAIN-MODEL:
# -----------------------------------------------------------------------------
class Blender(object):
TRANSFORMATION_MAP = {
"Red Tree Frog": "mush",
"apples": "apple juice",
"iPhone": "toxic waste",
"Galaxy Nexus": "toxic waste",
}
def __init__(self):
self.thing = None
self.result = None
@classmethod
def select_result_for(cls, thing):
return cls.TRANSFORMATION_MAP.get(thing, "DIRT")
def add(self, thing):
self.thing = thing
def switch_on(self):
self.result = self.select_result_for(self.thing)
|
raulpush/monitorizare-site
|
bin/features/steps/blender.py
|
Python
|
apache-2.0
| 1,217
|
[
"Galaxy"
] |
cd6d529f23fdc6ecf159a88970d29f5f010df0f0aeac21add76b78ab6e42986e
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 08 15:06:17 2016
@author: Pedro Leal
"""
import matplotlib.pyplot as plt
import scipy.signal as signal
import scipy.interpolate as interpolate
import numpy as np
from xfoil_module import output_reader
stress = "50MPa"
raw_data = output_reader("flexinol_isobaric_trained_" + stress + ".csv", separator=",",
rows_to_skip=4, header = ['Time', 'Extension',
'Load', "Temperature",
"Strain", "Stress"],)
#first_data = output_reader("flexinol_untrained_cyclic_temperature_0p.csv", separator=",",
# rows_to_skip=4, header = ['Time', 'Extension',
# 'Load', "Strain",
# "Stress", "Temperature"],)
##Ignore final data
#for i in range(len(first_data['Time'])):
# if first_data['Time'][i] == 2908.75300: #120745.90000: 5958.89500
# print 'ho'
# break
#
#old_data = first_data
#data = {}
#for key in first_data:
# data[key] = first_data[key][:i+1]
#
#first_data = data
#
## Ignore inital data
#for i in range(len(first_data['Time'])):
# if first_data['Time'][i] == 1130.15300:
# print 'hi'
# break
#
#data = {}
#for key in first_data:
# data[key] = first_data[key][i:]
#data = first_data
#==============================================================================
# Filtering data
#==============================================================================
#xx = np.linspace(data['Time'][0], data['Time'][-1],500)
#
#f = interpolate.interp1d(data['Time'],data['Strain'])
#eps_interp = f(xx)
#window = signal.gaussian(1, 1)
#smoothed_eps = signal.convolve(eps_interp, window/window.sum(), mode = 'same')
#f = interpolate.interp1d(data['Time'],data['Temperature'])
#T_interp = f(xx)
#window = signal.gaussian(1, 1)
#smoothed_T = signal.convolve(T_interp, window/window.sum(), mode = 'same')
#f = interpolate.interp1d(data['Time'],data['Stress'])
#sigma_interp = f(xx)
#window = signal.gaussian(1, 1)
#smoothed_sigma = signal.convolve(sigma_interp, window/window.sum(), mode = 'same')
#==============================================================================
# Formatting data
#==============================================================================
#for i in range(len(data["Stress"])):
# if data["Stress"] > 100.:
# break
#eps_0 = data["Strain"][i]
#
#eps_min = min(eps_interp)
#eps_interp = eps_interp - eps_min
#==============================================================================
# Plotting
#==============================================================================
plt.figure()
plt.plot(data["Temperature"],data["Strain"], label = 'First training')
#plt.plot(second_data["Temperature"],second_data["Strain"], label = 'Second training')
#plt.scatter(second_data["Temperature"][0],second_data["Strain"][0])
plt.xlabel("Temperature (C)")
plt.ylabel("Strain (m/m)")
plt.grid()
#plt.legend()
#plt.figure()
#plt.plot(data["Temperature"],data["Strain"])
#plt.xlabel("Temperature (C)")
#plt.ylabel("Strain (m/m)")
#plt.grid()
#
#plt.figure()
#plt.plot(first_data["Strain"], first_data["Stress"], label = 'First training')
#plt.plot(second_data["Strain"], second_data["Stress"], label = 'Second training')
#plt.xlabel("Strain (m/m)")
#plt.ylabel("Stress (MPa)")
#plt.grid()
#plt.legend()
#
plt.figure()
plt.plot(data["Temperature"], data["Stress"], label = 'First training')
#plt.plot(second_data["Temperature"], second_data["Stress"], label = 'Second training')
#plt.plot(smoothed_T, smoothed_sigma, 'g')
#plt.plot(T_interp, sigma_interp, 'r')
plt.xlabel("Temperature (C)")
plt.ylabel("Stress (MPa)")
plt.grid()
#plt.legend()
#
#plt.figure()
#plt.plot( np.array(first_data["Time"]) - data["Time"][0], first_data["Temperature"], label = 'First training')
#plt.plot( np.array(second_data["Time"]) - first_data["Time"][0], second_data["Temperature"], label = 'Second training')
##plt.plot(xx, smoothed_T, 'g')
##plt.plot(xx - xx[0], T_interp, 'r')
#plt.xlabel("Time (t)")
#plt.ylabel("Temperature (C)")
#plt.grid()
#plt.legend()
#
#plt.figure()
#plt.plot(first_data["Time"], first_data["Strain"], label = 'First training')
#plt.plot(second_data["Time"], second_data["Strain"], label = 'Second training')
##plt.plot(xx, smoothed_eps, 'g')
##plt.plot(xx - xx[0], eps_interp, 'r')
#plt.xlabel("Time (t)")
#plt.ylabel("Strain")
#plt.grid()
#plt.legend(loc=4)
#plt.figure()
##plt.plot(data["Temperature"],data["Strain"])
#plt.plot( smoothed_T, smoothed_eps, 'g')
#plt.plot(T_interp, eps_interp, 'r')
#plt.grid()
#plt.xlabel("Temperature")
#plt.ylabel("Strain")
#
##==============================================================================
## Stroing data
##==============================================================================
#data = np.array([T_interp, eps_interp, sigma_interp])
#try:
# np.savetxt("filtered_data_"+ stress+".txt", data.T,fmt='%.18f')
#except:
# print "No output file"
|
Mecanon/morphing_wing
|
experimental/sma_database/read_data.py
|
Python
|
mit
| 5,064
|
[
"Gaussian"
] |
260f600c8737b93be3232d51a03615d111a36a0541acadfcfde4890e804855dc
|
#!/usr/bin/env python
import vtk
red = [255, 0, 0]
green = [0, 255, 0]
blue = [0, 0, 255]
# Setup the colors array
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
# Add the colors we created to the colors array
colors.InsertNextTypedTuple(red)
colors.InsertNextTypedTuple(red)
colors.InsertNextTypedTuple(blue)
colors.InsertNextTypedTuple(blue)
colors.InsertNextTypedTuple(blue)
colors.InsertNextTypedTuple(green)
colors.InsertNextTypedTuple(green)
colors.InsertNextTypedTuple(green)
colors.InsertNextTypedTuple(red)
colors.InsertNextTypedTuple(blue)
# Create four points (must be in counter clockwise order)
p0 = [0.0, 0.0, 0.0]
p1 = [1.0, 0.0, 0.0]
p2 = [1.0, 1.0, 0.0]
p3 = [0.0, 1.0, 0.0]
p4 = [0.0, 0.0, 1.0]
p5 = [1.0, 0.0, 1.0]
p6 = [1.0, 1.0, 0.0]
p7 = [0.0, 1.0, 0.0]
p8 = [0.0, 0.0, 0.0]
p9 = [1, 1.0, 1.0]
# Add the points to a vtkPoints object
points = vtk.vtkPoints()
pid = points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
points.InsertNextPoint(p4)
points.InsertNextPoint(p5)
points.InsertNextPoint(p6)
points.InsertNextPoint(p7)
# Create a quad on the four points
quad = vtk.vtkQuad()
quad.GetPointIds().SetId(0, 0)
quad.GetPointIds().SetId(1, 1)
quad.GetPointIds().SetId(2, 2)
quad.GetPointIds().SetId(3, 3)
quad2 = vtk.vtkQuad()
quad2.GetPointIds().SetId(0, 4)
quad2.GetPointIds().SetId(1, 5)
quad2.GetPointIds().SetId(2, 6)
quad2.GetPointIds().SetId(3, 7)
points.InsertNextPoint(p8)
pid = points.InsertNextPoint(p9)
line = vtk.vtkLine()
line.GetPointIds().SetId(0, 8)
line.GetPointIds().SetId(1, 9)
# Create a cell array to store the quad in
vertices = vtk.vtkCellArray()
lines = vtk.vtkCellArray()
quads = vtk.vtkCellArray()
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
lines.InsertNextCell(line)
quads.InsertNextCell(quad)
quads.InsertNextCell(quad2)
# Create a polydata to store everything in
polydata = vtk.vtkPolyData()
# Add the points and quads to the dataset
polydata.SetPoints(points)
# polydata.SetVerts(vertices)
polydata.SetPolys(quads)
polydata.SetLines(lines)
polydata.GetPointData().SetScalars(colors)
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(20)
# Setup render window, renderer, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
WIDTH = 1640
HEIGHT = 1480
renderWindow.SetSize(WIDTH, HEIGHT)
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderWindow.Render()
renderWindowInteractor.Start()
|
tjssmy/CuviewerPy
|
vtkTests/quad.py
|
Python
|
mit
| 2,817
|
[
"VTK"
] |
efe81608bddb1dab401e1463f8ed7ee1fc8b93ec4c5120fbdc3309c866681e5e
|
import sys
from ase import Atoms
from gpaw import GPAW, FermiDirac
from gpaw import KohnShamConvergenceError
from gpaw.utilities import devnull, compiled_with_sl
from ase.structure import molecule
# Calculates energy and forces for various parallelizations
tolerance = 4e-5
parallel = dict()
basekwargs = dict(mode='fd',
maxiter=3,
#basis='dzp',
#nbands=18,
nbands=6,
kpts=(4,4,4), # 8 kpts in the IBZ
parallel=parallel)
Eref = None
Fref_av = None
def run(formula='H2O', vacuum=1.5, cell=None, pbc=1, **morekwargs):
print formula, parallel
system = molecule(formula)
kwargs = dict(basekwargs)
kwargs.update(morekwargs)
calc = GPAW(**kwargs)
system.set_calculator(calc)
system.center(vacuum)
if cell is None:
system.center(vacuum)
else:
system.set_cell(cell)
system.set_pbc(pbc)
try:
system.get_potential_energy()
except KohnShamConvergenceError:
pass
E = calc.hamiltonian.Etot
F_av = calc.forces.calculate(calc.wfs, calc.density,
calc.hamiltonian)
global Eref, Fref_av
if Eref is None:
Eref = E
Fref_av = F_av
eerr = abs(E - Eref)
ferr = abs(F_av - Fref_av).max()
if calc.wfs.world.rank == 0:
print 'Energy', E
print
print 'Forces'
print F_av
print
print 'Errs', eerr, ferr
if eerr > tolerance or ferr > tolerance:
if calc.wfs.world.rank == 0:
stderr = sys.stderr
else:
stderr = devnull
if eerr > tolerance:
print >> stderr, 'Failed!'
print >> stderr, 'E = %f, Eref = %f' % (E, Eref)
msg = 'Energy err larger than tolerance: %f' % eerr
if ferr > tolerance:
print >> stderr, 'Failed!'
print >> stderr, 'Forces:'
print >> stderr, F_av
print >> stderr
print >> stderr, 'Ref forces:'
print >> stderr, Fref_av
print >> stderr
msg = 'Force err larger than tolerance: %f' % ferr
print >> stderr
print >> stderr, 'Args:'
print >> stderr, formula, vacuum, cell, pbc, morekwargs
print >> stderr, parallel
raise AssertionError(msg)
# reference:
# kpt-parallelization = 8,
# state-parallelization = 1,
# domain-decomposition = (1,1,1)
run()
# kpt-parallelization = 2,
# state-parallelization = 2,
# domain-decomposition = (1,2,1)
parallel['band'] = 2
parallel['domain'] = (1, 2, 1)
run()
if compiled_with_sl():
# kpt-parallelization = 2,
# state-parallelization = 2,
# domain-decomposition = (1,2,1)
# with blacs
parallel['sl_default'] = (2, 2, 2)
run()
# perform spin polarization test
parallel = dict()
basekwargs = dict(mode='fd',
maxiter=3,
nbands=6,
kpts=(4,4,4), # 8 kpts in the IBZ
parallel=parallel)
Eref = None
Fref_av = None
OH_kwargs = dict(formula='NH2', vacuum=1.5, pbc=1, spinpol=1,
occupations=FermiDirac(width=0.1))
# reference:
# kpt-parallelization = 4,
# spin-polarization = 2,
# state-parallelization = 1,
# domain-decomposition = (1,1,1)
run(**OH_kwargs)
# kpt-parallelization = 2,
# spin-polarization = 2,
# domain-decomposition = (1, 2, 1)
parallel['domain'] = (1, 2, 1)
run(**OH_kwargs)
# kpt-parallelization = 2,
# spin-polarization = 2,
# state-parallelization = 2,
# domain-decomposition = (1, 1, 1)
del parallel['domain']
parallel['band'] = 2
run(**OH_kwargs)
# do last test plus buffer_size keyword
parallel['buffer_size'] = 150
run(**OH_kwargs)
if compiled_with_sl():
# kpt-parallelization = 2,
# spin-polarization = 2,
# state-parallelization = 2,
# domain-decomposition = (1, 2, 1)
# with blacs
del parallel['buffer_size']
parallel['domain'] = (1, 2, 1)
parallel['sl_default'] = (2, 1, 2)
run(**OH_kwargs)
# kpt-parallelization = 2,
# state-parallelization = 2,
# domain-decomposition = (1, 2, 1)
# with blacs
parallel['sl_default'] = (2, 2, 2)
run(**OH_kwargs)
# do last test plus buffer_size keyword
parallel['buffer_size'] = 150
run(**OH_kwargs)
|
ajylee/gpaw-rtxs
|
gpaw/test/parallel/fd_parallel_kpt.py
|
Python
|
gpl-3.0
| 4,339
|
[
"ASE",
"GPAW"
] |
993593669a572e94cc962f3b19750dcbd4ff33601a5186ef04e6c728a3c1e003
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of Scimap.
#
# Scimap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scimap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scimap. If not, see <http://www.gnu.org/licenses/>.
"""Define some classes for more fine-grained control over exception
handling."""
class DeprecationError(Exception):
"""This feature has been replaced by a new method."""
pass
#############################
# Electrochemistry exceptions
#############################
class ReadCurrentError(Exception):
"""Cannot read a current from electrochemistry cycling file."""
pass
#####################
# Mapping exceptions
#####################
class MappingFileNotFoundError(FileNotFoundError):
pass
##############################
# X-Ray Diffraction exceptions
##############################
class UnitCellError(ValueError):
"""The given unit-cell parameters are invalid for the crystal
system.
"""
pass
class HKLFormatError(ValueError):
"""The given hkl index is vague of malformed."""
pass
class RefinementError(Exception):
"""Generic error: we tried to refine something but it didn't work."""
pass
class RefinementWarning(RuntimeWarning):
"""We tried to refine something but it didn't work."""
pass
class PeakFitError(RefinementError):
"""Tried to fit curves to a peak but could not find a local minimum."""
pass
class EmptyRefinementError(RefinementError):
"""There were no parameters to refine."""
pass
class SingularMatrixError(RefinementError):
"""FullProf refinement reached an invalid Hessian and was not able to
continue."""
def __init__(self, param):
self.param = param
def __str__(self):
msg = "Singular matrix while refining {param}".format(param=self.param)
return msg
class DivergenceError(RefinementError):
"""Fullprof refinement did not reach a local maximum."""
pass
class PCRFileError(RefinementError):
"""Tried to read a Fullprof PCR file but it did not conform to the
expected format."""
pass
class NoReflectionsError(RefinementError):
"""The refinement has I(obs) = 0. (Do you really have reflections?)"""
def __str__(self):
msg = "I(obs) = 0. Do you really have reflections? {}"
return msg.format(self.args)
class UnknownFileTypeError(ValueError):
"""This file does not have a handler registered."""
pass
class FileFormatError(ValueError):
"""This file is not formatted as expected."""
pass
class DataNotFoundError(FileNotFoundError):
"""Expected a directory containing data but found none."""
pass
class FrameFileNotFound(IOError):
"""Expected to load a TXM frame file but it doesn't exist."""
pass
class GroupKeyError(KeyError):
"""Tried to load an HDF group but the group doesn't exist or is
ambiguous."""
pass
class DataFormatError(RuntimeError):
"""The raw data are arranged in a way that the importers or TXM classes do
not understand.
"""
pass
class HDFScopeError(ValueError):
"""Tried to pass an HDF scope that is not recognized."""
pass
class HDFAttrsError(ValueError):
"""Class uses hdf_attrs decorator but dosn't have hdfattrs attribute."""
pass
class FileExistsError(IOError):
"""Tried to import a TXM frameset but the corresponding HDF file
already exists."""
pass
class CreateGroupError(ValueError):
"""Tried to import a TXM frameset into a group but the corresponding
HDF group already exists.
"""
pass
class FilenameParseError(ValueError):
"""The parameters in the filename do not match the naming scheme
associated with this flavor."""
pass
class DatasetExistsError(RuntimeError):
"""Trying to save a new dataset but one already exists with the given
path."""
pass
class NoParticleError(Exception):
pass
class ChemicalFormulaError(ValueError):
"""The given chemical formula cannot be properly understood or
parsed.
"""
pass
|
m3wolf/scimap
|
scimap/exceptions.py
|
Python
|
gpl-3.0
| 4,504
|
[
"CRYSTAL"
] |
6c996c9830049808634db0bdd53c5c773c3e83c91a6480dcab74206cbc60554f
|
import discord
from .nodes.upgrades import upgrade_list
def calculate_upgrade(up_id, level):
up_table = {
'stamina': {
'amount': -(60 - (int(60 - ((60 / 100) * (level * 0.5))))),
'end': 'Seconds'
},
'luck': {
'amount': level * 250,
'end': 'Bonus'
},
'storage': {
'amount': 64 + (level * 8),
'end': 'Spaces'
},
'oven': {
'amount': -(3600 - (int(3600 - ((3600 / 100) * (level * 0.2))))),
'end': 'Seconds'
},
'casino': {
'amount': -(60 - (int(60 - ((60 / 100) * (level * 0.5))))),
'end': 'Seconds'
}
}
return up_table[up_id]
async def upgrades(cmd, message, args):
if message.mentions:
target = message.mentions[0]
else:
target = message.author
upgrade_file = cmd.db[cmd.db.db_cfg.database].Upgrades.find_one({'UserID': target.id})
if upgrade_file is None:
cmd.db[cmd.db.db_cfg.database].Upgrades.insert_one({'UserID': target.id})
upgrade_file = {}
upgrade_text = ''
upgrade_index = 0
for upgrade in upgrade_list:
upgrade_index += 1
upgrade_id = upgrade['id']
if upgrade_id in upgrade_file:
upgrade_level = upgrade_file[upgrade_id]
else:
upgrade_level = 0
up_data = calculate_upgrade(upgrade_id, upgrade_level)
upgrade_text += f'\n**Level {upgrade_level}** {upgrade["name"]}: **{up_data["amount"]} {up_data["end"]}**'
upgrade_list_embed = discord.Embed(color=0xF9F9F9, title=f'🛍 {target.display_name}\'s Sigma Upgrades')
upgrade_list_embed.description = upgrade_text
await message.channel.send(embed=upgrade_list_embed)
|
AXAz0r/apex-sigma-core
|
sigma/modules/minigames/professions/upgrades.py
|
Python
|
gpl-3.0
| 1,781
|
[
"CASINO"
] |
d59456ded9e44afe25e4a26fd34847b8e671f9af23780734beaa3320ea2e8128
|
from pyseeyou.grammar import ICUMessageFormat
from pyseeyou.node_visitor import ICUNodeVisitor
def format(msg, values, lang):
'''
:param msg: String, The message to parse
:param values: Dict, The values to use in forming the final message
:param lang: String, The language code for the language to use to decide on
pluralisation rules
'''
ast = ICUMessageFormat.parse(msg)
return format_tree(ast, values, lang)
def format_tree(ast, values, lang):
'''
:param ast: parsimonious.nodes.Node, Formed tree of the message to parse
:param values: Dict, The values to use in forming the final message
:param lang: String, The language code for the language to use to decide on
pluralisation rules
'''
i = ICUNodeVisitor(values, lang)
return i.visit(ast)
|
rolepoint/pyseeyou
|
pyseeyou/__init__.py
|
Python
|
mit
| 842
|
[
"VisIt"
] |
c0dc4b71d93fd9b934ddb710af71555594c855cb1ff77516e7dd4279c46a64d7
|
#
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import sys
import copy
import os.path
import operator
from .Errors import error, warning, warn_once, InternalError, CompileError
from .Errors import hold_errors, release_errors, held_errors, report_error
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
def get_exception_handler(exception_value):
if exception_value is None:
return "__Pyx_CppExn2PyErr();"
elif exception_value.type.is_pyobject:
return 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
exception_value.entry.cname,
exception_value.entry.cname)
else:
return '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % exception_value.entry.cname
def translate_cpp_exception(code, pos, inside, exception_value, nogil):
raise_py_exception = get_exception_handler(exception_value)
code.putln("try {")
code.putln("%s" % inside)
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Used to handle the case where an lvalue expression and an overloaded assignment
# both have an exception declaration.
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code,
lhs_exc_val, assign_exc_val, nogil):
handle_lhs_exc = get_exception_handler(lhs_exc_val)
handle_assignment_exc = get_exception_handler(assign_exc_val)
code.putln("try {")
code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
code.putln("try {")
code.putln("__pyx_local_lvalue = %s;" % rhs_code)
# Catch any exception from the overloaded assignment.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_assignment_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Catch any exception from evaluating lhs.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_lhs_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln('}')
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_set_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
is_slice = False
is_buffer_access = False
is_memview_index = False
is_memview_slice = False
is_memview_broadcast = False
is_memview_copy_assignment = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
elif not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
copying=self.is_memview_copy_assignment):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class:
return SimpleCallNode(
self.pos,
function=AttributeNode(
self.pos, obj=self, attribute='operator bool'),
args=[]).analyse_types(env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
not self.has_constant_result() or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=PyrexTypes.py_object_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) <= 2:
# too short to go wrong (and simplifies code below)
return value
neg_sign = ''
if value[0] == '-':
neg_sign = '-'
value = value[1:]
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but C-incompatible Py3 oct/bin notations need conversion
if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
# negative hex/octal literal => prevent C compiler from using
# unsigned integer types by converting to decimal (see C standard 6.4.4.1)
value = str(Utils.str_to_number(value))
elif literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = str(int(value[2:], 2))
elif value.isdigit() and not self.unsigned and not self.longness:
if not neg_sign:
# C compilers do not consider unsigned types for decimal literals,
# but they do for hex (see C standard 6.4.4.1)
value = '0x%X' % int(value)
return neg_sign + value
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, basestring)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
hold_errors()
from .TreeFragment import TreeFragment
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
sizeof_node = None
else:
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
release_errors(ignore=True)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif self.type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.bytes_literal(
self.bytes_value[start:stop:step], self.bytes_value.encoding)
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
if self.value.is_unicode:
return self.value
if not IS_PYTHON3:
# use plain str/bytes object in Py2
return self.value.byteencode()
# in Py3, always return a Unicode string
if self.unicode_value is not None:
return self.unicode_value
return self.value.decode('iso8859-1')
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if self.entry.is_cfunction and self.entry.as_variable:
if self.entry.is_overridable or not self.is_lvalue() and self.entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if entry.is_type and entry.type.is_enum:
py_entry = Symtab.Entry(self.name, None, py_object_type)
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
elif not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
if overloaded_assignment:
result = rhs.result()
if exception_check == '+':
translate_cpp_exception(code, self.pos, '%s = %s;' % (self.result(), result), exception_value, self.in_nogil_context)
else:
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln(
'if (unlikely(%s < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
'}' % (del_code, code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
import_code = "__Pyx_Import(%s, %s, %d)" % (
self.module_name.py_result(),
name_list_code,
self.level)
if (self.level <= 0 and
self.module_name.is_string_literal and
self.module_name.value in utility_code_for_imports):
helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value]
code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
import_code = '%s(%s)' % (helper_func, import_code)
code.putln("%s = %s; %s" % (
self.result(),
import_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type in (list_type, tuple_type)
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.put_gotref(result_name)
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class AsyncIteratorNode(ExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
#
# sequence ExprNode
subexprs = ['sequence']
is_async = True
type = py_object_type
is_temp = 1
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
self.sequence = self.sequence.coerce_to_pyobject(env)
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class AsyncNextNode(AtomicExprNode):
# Used as part of 'async for' statement implementation.
# Implements result = iterator.__anext__()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
is_temp = 1
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
self.result(),
self.iterator.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
# await AwaitExprNode the await expression of an 'async with' statement
subexprs = ['args', 'await']
test_if_run = True
await = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
if self.await:
self.await = self.await.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.await:
# FIXME: result_var temp currently leaks into the closure
self.await.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
code.putln("%s = %s;" % (result_var, self.await.py_result()))
self.await.generate_post_assignment_code(code)
self.await.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class _IndexingBaseNode(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
class IndexNode(_IndexingBaseNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# type_indices [PyrexType]
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index']
type_indices = None
is_subscript = True
is_fused_index = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception as e:
self.compile_time_value_error(e)
def is_simple(self):
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if self.index.is_slice:
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if node is self and not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = self.index.is_slice
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
replacement_node = self.analyse_as_buffer_operation(env, getting)
if replacement_node is not None:
return replacement_node
self.nogil = env.nogil
base_type = self.base.type
if not base_type.is_cfunction:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
return self.analyse_as_pyobject(env, is_slice, getting, setting)
elif base_type.is_ptr or base_type.is_array:
return self.analyse_as_c_array(env, is_slice)
elif base_type.is_cpp_class:
return self.analyse_as_cpp(env, setting)
elif base_type.is_cfunction:
return self.analyse_as_c_function(env)
elif base_type.is_ctuple:
return self.analyse_as_c_tuple(env, getting, setting)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
return self
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.wrap_in_nonecheck_node(env, getting)
return self
def analyse_as_c_array(self, env, is_slice):
base_type = self.base.type
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos, "Invalid index type '%s'" % self.index.type)
return self
def analyse_as_cpp(self, env, setting):
base_type = self.base.type
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check:
if not setting:
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
return self
def analyse_as_c_function(self, env):
base_type = self.base.type
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = error_type
else:
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
# FIXME: use a dedicated Node class instead of generic IndexNode
return self
def analyse_as_c_tuple(self, env, getting, setting):
base_type = self.base.type
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
return self
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
def analyse_as_buffer_operation(self, env, getting):
"""
Analyse buffer indexing and memoryview indexing/slicing
"""
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
base_type = self.base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
elif base_type.is_buffer and len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
for index in indices:
index = index.analyse_types(env)
if not index.type.is_int:
is_buffer_access = False
if is_buffer_access:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
return replacement_node
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def calculate_result_code(self):
if self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.empty_declaration_code() for param in self.type_indices]))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, _py_int_types)
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_result_code(self, code):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type.is_cpp_class and self.exception_check:
translate_cpp_exception(code, self.pos,
"%s = %s[%s];" % (self.result(), self.base.result(),
self.index.result()),
self.exception_value, self.in_nogil_context)
else:
error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto_if(error_check % self.result(), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %s%s)" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code)),
self.pos))
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
if overloaded_assignment and exception_check and \
self.exception_value != exception_value:
# Handle the case that both the index operator and the assignment
# operator have a c++ exception handler and they are not the same.
translate_double_cpp_exception(code, self.pos, self.type,
self.result(), rhs.result(), self.exception_value,
exception_value, self.in_nogil_context)
else:
# Handle the case that only the index operator has a
# c++ exception handler, or that
# both exception handlers are the same.
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), rhs.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s;" % (self.result(), rhs.result()))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s%s)" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code)),
self.pos))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
class BufferIndexNode(_IndexingBaseNode):
"""
Indexing of buffers and memoryviews. This node is created during type
analysis from IndexNode and replaces it.
Attributes:
base - base node being indexed
indices - list of indexing expressions
"""
subexprs = ['base', 'indices']
is_buffer_access = True
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
def analyse_types(self, env, getting=True):
"""
Analyse types for buffer indexing only. Overridden by memoryview
indexing and slicing subclasses
"""
# self.indices are already analyzed
if not self.base.is_name:
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
if not getting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
self.none_error_message = "'NoneType' object is not subscriptable"
self.analyse_buffer_index(env, getting)
self.wrap_in_nonecheck_node(env)
return self
def analyse_buffer_index(self, env, getting):
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type
if getting and self.type.is_pyobject:
self.is_temp = True
def analyse_assignment(self, rhs):
"""
Called by IndexNode when this node is assigned to,
with the rhs of the assignment
"""
def wrap_in_nonecheck_node(self, env):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node(self.none_error_message)
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
if env.directives['boundscheck']:
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
def calculate_result_code(self):
return "(*%s)" % self.buffer_ptr_code
def buffer_entry(self):
base = self.base
if self.base.is_nonecheck:
base = base.arg
return base.type.get_entry(base)
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
# Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [
code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type, PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
for ivar in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more efficient access", level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
class MemoryViewSliceNode(MemoryViewIndexNode):
is_memview_slice = True
# No-op slicing operation, this node will be replaced
is_ellipsis_noop = False
is_memview_scalar_assignment = False
is_memview_index = False
is_memview_broadcast = False
def analyse_ellipsis_noop(self, env, getting):
"""Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
### FIXME: replace directly
self.is_ellipsis_noop = all(
index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
for index in self.indices)
if self.is_ellipsis_noop:
self.type = self.base.type
def analyse_operation(self, env, getting, axes):
from . import MemoryView
if not getting:
self.is_memview_broadcast = True
self.none_error_message = "Cannot assign to None memoryview slice"
else:
self.none_error_message = "Cannot slice None memoryview slice"
self.analyse_ellipsis_noop(env, getting)
if self.is_ellipsis_noop:
return
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return
self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
if not (self.base.is_simple() or self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
def analyse_assignment(self, rhs):
if not rhs.type.is_memoryviewslice and (
self.type.dtype.assignable_from(rhs.type) or
rhs.type.is_pyobject):
# scalar assignment
return MemoryCopyScalar(self.pos, self)
else:
return MemoryCopySlice(self.pos, self)
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return self.base.is_simple() or self.base.result_in_temp()
return self.result_in_temp()
def calculate_result_code(self):
"""This is called in case this is a no-op slicing node"""
return self.base.result()
def generate_result_code(self, code):
if self.is_ellipsis_noop:
return ### FIXME: remove
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
# TODO Mark: this is insane, do it better
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
if index.is_slice:
have_slices = True
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(
code, self.original_indices, self.result(),
have_gil=have_gil, have_slices=have_slices,
directives=code.globalstate.directives)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.is_ellipsis_noop:
self.generate_subexpr_evaluation_code(code)
else:
self.generate_evaluation_code(code)
if self.is_memview_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
else:
self.generate_memoryviewslice_setslice_code(rhs, code)
if self.is_ellipsis_noop:
self.generate_subexpr_disposal_code(code)
else:
self.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopyNode(ExprNode):
"""
Wraps a memoryview slice for slice assignment.
dst: destination mememoryview slice
"""
subexprs = ['dst']
def __init__(self, pos, dst):
super(MemoryCopyNode, self).__init__(pos)
self.dst = dst
self.type = dst.type
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.dst.generate_evaluation_code(code)
self._generate_assignment_code(rhs, code)
self.dst.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopySlice(MemoryCopyNode):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
memslice1[...] = memslice2
memslice1[:] = memslice2
"""
is_memview_copy_assignment = True
copy_slice_cname = "__pyx_memoryview_copy_contents"
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
class MemoryCopyScalar(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super(MemoryCopyScalar, self).__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception as e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, _py_int_types):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, _py_int_types)
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
is_slice = True
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception as e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
if func_type.exception_check == '+':
self.is_temp = True
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif self.function.is_subscript and self.function.is_fused_index:
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
if not func_type.is_cpp_class:
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in range(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
if arg.type is str_type:
arg_ctype = PyrexTypes.c_char_ptr_type
else:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
self.function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
func_type.exception_value, self.nogil)
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = None
if len(args) > 1:
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, "function")
if len(args) > 1:
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
if not args:
# fastest special case: try to avoid tuple creation
code.putln("if (%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, self_arg,
code.error_goto_if_null(self.result(), self.pos)))
code.put_decref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function,
code.error_goto_if_null(self.result(), self.pos)))
code.putln("}")
code.put_gotref(self.py_result())
else:
if len(args) == 1:
code.putln("if (!%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
arg = args[0]
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
arg.generate_disposal_code(code)
code.put_gotref(self.py_result())
code.putln("} else {")
arg_offset = 1
else:
arg_offset = arg_offset_cname
args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_New(%d+%s); %s" % (
args_tuple, len(args), arg_offset,
code.error_goto_if_null(args_tuple, self.pos)))
code.put_gotref(args_tuple)
if len(args) > 1:
code.putln("if (%s) {" % self_arg)
code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % (
self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case
code.funcstate.release_temp(self_arg)
if len(args) > 1:
code.putln("}")
for i, arg in enumerate(args):
arg.make_owned_reference(code)
code.put_giveref(arg.py_result())
code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
args_tuple, i, arg_offset, arg.py_result()))
if len(args) > 1:
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_post_assignment_code(code)
arg.free_temps(code)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
function, args_tuple,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(args_tuple, py_object_type)
code.funcstate.release_temp(args_tuple)
if len(args) == 1:
code.putln("}")
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in range(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception as e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
if self.arg.type is tuple_type:
return self.arg.as_none_safe_node("'NoneType' object is not iterable")
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class MergedDictNode(ExprNode):
# Helper class for keyword arguments and other merged dicts.
#
# keyword_args [DictNode or other ExprNode]
subexprs = ['keyword_args']
is_temp = 1
type = dict_type
reject_duplicates = True
def calculate_constant_result(self):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = ((key.constant_result, value.constant_result)
for key, value in item.key_value_pairs)
else:
items = item.constant_result.iteritems()
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = [(key.compile_time_value(denv), value.compile_time_value(denv))
for key, value in item.key_value_pairs]
else:
items = item.compile_time_value(denv).iteritems()
try:
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception as e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for arg in self.keyword_args
]
if len(args) == 1 and args[0].type is dict_type:
# strip this intermediate node and use the bare dict
arg = args[0]
if arg.is_name and arg.entry.is_arg and len(arg.entry.cf_assignments) == 1:
# passing **kwargs through to function call => allow NULL
arg.allow_null = True
return arg
self.keyword_args = args
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
args = iter(self.keyword_args)
item = next(args)
item.generate_evaluation_code(code)
if item.type is not dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_CheckExact(%s))) {' %
item.py_result())
if item.is_dict_literal:
item.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = PyDict_Copy(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), item.pos)))
code.put_gotref(self.result())
item.generate_disposal_code(code)
if item.type is not dict_type:
code.putln('} else {')
code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
code.putln('}')
item.free_temps(code)
helpers = set()
for item in args:
if item.is_dict_literal:
# inline update instead of creating an intermediate dict
for arg in item.key_value_pairs:
arg.generate_evaluation_code(code)
if self.reject_duplicates:
code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
self.result(),
arg.key.py_result()))
helpers.add("RaiseDoubleKeywords")
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
arg.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
arg.key.py_result(),
arg.value.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
item.generate_evaluation_code(code)
if self.reject_duplicates:
# merge mapping into kwdict one by one as we need to check for duplicates
helpers.add("MergeKeywords")
code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
self.result(), item.py_result()))
else:
# simple case, just add all entries
helpers.add("RaiseMappingExpected")
code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
self.result(), item.py_result()))
code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
"__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
code.putln(code.error_goto(item.pos))
code.putln("}")
item.generate_disposal_code(code)
item.free_temps(code)
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
def annotate(self, code):
for item in self.keyword_args:
item.annotate(code)
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_type_attribute(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_type_attribute(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_type_attribute(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type:
if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method:
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
elif type.is_enum:
if self.attribute in type.values:
return self.as_name_node(env, env.lookup(self.attribute), target=False)
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type.transpose(self.pos)
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredUnpackingNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i, arg in enumerate(self.args):
if not skip_children:
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def coerce_to_ctuple(self, dst_type, env):
if self.type == dst_type:
return self
assert not self.mult_factor
if len(self.args) != dst_type.size:
error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
dst_type.size, len(self.args)))
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if not any(arg.is_starred for arg in self.args):
return self
# convert into MergedSequenceNode by building partial sequences
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(
self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and
mult_factor.constant_result > 0):
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join(arg.py_result() for arg in self.args),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.put_giveref(arg.py_result())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
return tuple_type
else:
return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
if not skip_children:
for i, arg in enumerate(self.args):
if arg.is_starred:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_fused) for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
node = SequenceNode.analyse_types(self, env, skip_children=True)
node = node._create_merge_node_if_necessary(env)
if not node.is_sequence_constructor:
return node
if not all(child.is_literal for child in node.args):
return node
if not node.mult_factor or (
node.mult_factor.is_literal and
isinstance(node.mult_factor.constant_result, _py_int_types)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
return self.coerce_to_ctuple(dst_type, env)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
elif dst_type.is_ctuple and not self.mult_factor:
return self.coerce_to_ctuple(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
for arg in self.args:
if arg.is_starred:
arg.starred_expr_allowed_here = True
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, _py_int_types):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
elif dst_type.is_ctuple:
return self.coerce_to_ctuple(dst_type, env)
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ExprNode):
# An inlined generator expression for which the result is calculated
# inside of the loop and returned as a single, first and only Generator
# return value.
# This will only be created by transforms when replacing safe builtin
# calls on generator expressions.
#
# gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
# orig_func String the name of the builtin function this node replaces
# target ExprNode or None a 'target' for a ComprehensionAppend node
subexprs = ["gen"]
orig_func = None
target = None
is_temp = True
type = py_object_type
def __init__(self, pos, gen, comprehension_type=None, **kwargs):
gbody = gen.def_node.gbody
gbody.is_inlined = True
if comprehension_type is not None:
assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
gbody.inlined_comprehension_type = comprehension_type
kwargs.update(
target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
type=comprehension_type,
)
super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
def may_be_none(self):
return self.orig_func not in ('any', 'all', 'sorted')
def infer_type(self, env):
return self.type
def analyse_types(self, env):
self.gen = self.gen.analyse_expressions(env)
return self
def generate_result_code(self, code):
code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
self.result(), self.gen.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class MergedSequenceNode(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set else 'PySequence_List',
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
class SetNode(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
reject_duplicates = False
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if self.type.is_struct_or_union:
if not dict_type.subtype_of(dst_type):
error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
return DictNode(self.pos, key_value_pairs=[
DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
value=item.value.coerce_to_pyobject(env))
for item in self.key_value_pairs])
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_dict = self.type.is_pyobject
if is_dict:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
keys_seen = set()
key_type = None
needs_error_helper = False
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if is_dict:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
key = item.key
if self.reject_duplicates:
if keys_seen is not None:
# avoid runtime 'in' checks for literals that we can do at compile time
if not key.is_string_literal:
keys_seen = None
elif key.value in keys_seen:
# FIXME: this could be a compile time error, at least in Cython code
keys_seen = None
elif key_type is not type(key.value):
if key_type is None:
key_type = type(key.value)
keys_seen.add(key.value)
else:
# different types => may not be able to compare at compile time
keys_seen = None
else:
keys_seen.add(key.value)
if keys_seen is None:
code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
self.result(), key.py_result()))
# currently only used in function calls
needs_error_helper = True
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
key.py_result(),
code.error_goto(item.pos)))
code.putln("} else {")
code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.reject_duplicates and keys_seen is None:
code.putln('}')
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
if needs_error_helper:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
self.genexpr_name = env.next_id('genexpr')
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
is_await = False
expr_keyword = 'yield'
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
expr_keyword = 'yield from'
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
return "__Pyx_Generator_Yield_From"
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
self.arg.generate_evaluation_code(code)
code.putln("%s = %s(%s, %s);" % (
Naming.retval_cname,
self.yield_from_func(code),
Naming.generator_cname,
self.arg.py_result() if source_cname is None else source_cname))
if source_cname is None:
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
elif decref_source:
code.put_decref_clear(source_cname, py_object_type)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
self.fetch_iteration_result(code)
else:
self.handle_iteration_exception(code)
code.putln("}")
def fetch_iteration_result(self, code):
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
code.put_gotref(self.result())
def handle_iteration_exception(self, code):
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
class AwaitExprNode(YieldFromExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
# label_num integer yield label number
is_await = True
expr_keyword = 'await'
def coerce_yield_argument(self, env):
if self.arg is not None:
# FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
return "__Pyx_Coroutine_Yield_From"
class AwaitIterNextExprNode(AwaitExprNode):
# 'await' expression node as part of 'async for' iteration
#
# Breaks out of loop on StopAsyncIteration exception.
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type && likely(exc_type == __Pyx_PyExc_StopAsyncIteration ||"
" PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
entry = env.lookup_operator(self.operator, [self.operand])
if overload_check and not entry:
self.type_error()
return
if entry:
self.exception_check = entry.type.exception_check
self.exception_value = entry.type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
self.exception_check = ''
self.exception_value = ''
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
self.analyse_cpp_operation(env, overload_check=False)
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
if (self.operand.type.is_cpp_class and self.exception_check == '+'):
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.coercion_type.validate_memslice_dtype(self.pos)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
# Used by NumBinopNodes to break up expressions involving multiple
# operators so that exceptions can be handled properly.
self.is_temp = 1
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
# C++ overloaded operators with exception values are currently all
# handled through temporaries.
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), self.calculate_result_code()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def _check_truedivision(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
def infer_type(self, env):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_operation(self, env):
self._check_truedivision(env)
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(
UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
in_nogil = self.in_nogil_context
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(
UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
}
if in_nogil:
result_code = 'result'
code.putln("int %s;" % result_code)
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("%s = %s;" % (result_code, warning_code))
code.put_release_ensured_gil()
else:
result_code = warning_code
code.putln(code.set_error_info(self.pos, used=True))
code.put("if (unlikely(%s)) " % result_code)
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
else: # float
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# NOTE: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, _py_int_types) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result()))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, any_string_type) and
isinstance(operand2_result, any_string_type) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1.is_ctuple or type2.is_ctuple:
new_common_type = py_object_type
elif type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
statement = "%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2)
if self.is_cpp_comparison() and self.exception_check == '+':
translate_cpp_exception(code, self.pos, statement, self.exception_value, self.in_nogil_context)
code.putln(statement)
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.is_pycmp = False
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type: 'PyList_GET_SIZE',
Builtin.tuple_type: 'PyTuple_GET_SIZE',
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
Builtin.unicode_type: 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
|
mrGeen/cython
|
Cython/Compiler/ExprNodes.py
|
Python
|
apache-2.0
| 498,361
|
[
"VisIt"
] |
8b08754e359c0404f485012e8b0446e5a3258299623159b4e4f6018c4cfca368
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if (os.path.isdir(filename) and
not os.path.islink(filename)):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
# XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (ScriptWriter.get_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path, sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2, 6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls.from_string('"' + cls._sys_executable() + '"')
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string)
return JythonCommandSpec.from_string(string) or cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class JythonCommandSpec(CommandSpec):
@classmethod
def from_string(cls, string):
"""
On Jython, construct an instance of this class.
On platforms other than Jython, return None.
"""
needs_jython_spec = (
sys.platform.startswith('java')
and
__import__('java').lang.System.getProperty('os.name') != 'Linux'
)
return cls([string]) if needs_jython_spec else None
def as_header(self):
"""
Workaround Jython's sys.executable being a .sh (an invalid
shebang line interpreter)
"""
if not is_sh(self[0]):
return super(JythonCommandSpec, self).as_header()
if self.options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
return super(JythonCommandSpec, self).as_header()
items = ['/usr/bin/env'] + self + list(self.options)
return self._render(items)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = CommandSpec.from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = cls.template % locals()
for res in cls._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
return WindowsScriptWriter.best() if sys.platform == 'win32' else cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = CommandSpec.from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
gagoncal/Selenium
|
Selenium with python/Selenium_with_python_config_files/setuptools-12.0.1/setuptools/command/easy_install.py
|
Python
|
lgpl-2.1
| 85,584
|
[
"VisIt"
] |
b6fe3f4927ea9298217bcf1ab1f1861f3b91db92835a3705792f4f0869ee7f6b
|
#! /usr/bin/env python
#adam-does# matches the redshifts from our pipeline/bpz to external reference redshifts
#adam-example# ipython -i -- ./match_specz_and_bpz_cats.py nedcat bpzcat =astropy.io.ascii.read("/u/ki/awright/bonnpipeline/adam_ned_MACS1226+21_galaxies.tsv")
#adam-example# ipython -i -- ./match_specz_and_bpz_cats.py /u/ki/awright/bonnpipeline/adam_ned_MACS1226+21_galaxies.tsv /nfs/slac/g/ki/ki18/anja/SUBARU/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.calibrated.bpztab.cat
import sys,os,inspect ; sys.path.append('/u/ki/awright/InstallingSoftware/pythons')
from import_tools import *
curfile=os.path.abspath(inspect.getfile(inspect.currentframe()))
FileString=os.path.basename(curfile)
args=imagetools.ArgCleaner(sys.argv,FileString)
import numpy as np
import matplotlib.pyplot as plt
import astropy
from astropy.coordinates import SkyCoord
from astropy import units as u
ned_file= "/u/ki/awright/bonnpipeline/adam-nedgals2mygals/sdssj1226_redshifts_converted.tsv"
bpz_file= "/nfs/slac/g/ki/ki18/anja/SUBARU/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.calibrated.bpztab.cat"
sdss_file="/u/ki/awright/bonnpipeline/adam-nedgals2mygals/cat_additions_from_sdss_zspecs.tsv" #'Survey', 'RA', 'Dec', 'Redshift', 'SN', 'Class'
sdsscat=astropy.io.ascii.read(sdss_file)
nedcat=astropy.io.ascii.read(ned_file)
bpzcat=astropy.io.fits.open(bpz_file)
#adam-example# bpzcat=astropy.io.fits.open("/nfs/slac/g/ki/ki18/anja/SUBARU/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.calibrated.bpztab.cat")
# os.system("ldacdesc -i /nfs/slac/g/ki/ki18/anja/SUBARU/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.calibrated.bpztab.cat")
# primary OBJECTS PHOTINFO FIELDS BPZTAB
bpztab=bpzcat[-1]
objtab=bpzcat[1]
bpzdat=bpztab.data
objdat=objtab.data
raw_bpz_z=bpzdat.field("BPZ_Z_B")
raw_bpz_z_min=bpzdat.field("BPZ_Z_B_MIN")
raw_bpz_z_max=bpzdat.field("BPZ_Z_B_MAX")
raw_bpz_odds=bpzdat.field("BPZ_ODDS")
raw_bpz_SeqNr=objdat.field('SeqNr')
raw_bpz_ra=objdat.field('ALPHA_J2000')
raw_bpz_dec=objdat.field('DELTA_J2000')
#raw_bpz_rmag=objdat.field("MAG_AUTO1-SUBARU-10_3-1-W-C-RC")
raw_bpz_rmag=objdat.field("MAG_APER1-SUBARU-10_3-1-W-C-RC")
raw_bpz_imag=objdat.field("MAG_APER1-SUBARU-10_3-1-W-C-IC")
raw_bpz_rmagerr=objdat.field("MAGERR_APER1-SUBARU-10_3-1-W-C-RC")
raw_bpz_imagerr=objdat.field("MAGERR_APER1-SUBARU-10_3-1-W-C-IC")
raw_r_ok=raw_bpz_rmag>0
raw_i_ok=raw_bpz_imag>0
## now clip down to the bpz catalog that actually has a similar magnitude range:
ok_bpz=(raw_bpz_rmag<23)*(raw_bpz_imag<23)
if ok_bpz.any():
bpz_z=raw_bpz_z[ok_bpz]
bpz_z_min=raw_bpz_z_min[ok_bpz]
bpz_z_max=raw_bpz_z_max[ok_bpz]
bpz_odds=raw_bpz_odds[ok_bpz]
bpz_ra=raw_bpz_ra[ok_bpz]
bpz_dec=raw_bpz_dec[ok_bpz]
bpz_rmag=raw_bpz_rmag[ok_bpz]
bpz_imag=raw_bpz_imag[ok_bpz]
bpz_rmagerr=raw_bpz_rmagerr[ok_bpz]
bpz_imagerr=raw_bpz_imagerr[ok_bpz]
r_ok=raw_r_ok[ok_bpz]
i_ok=raw_i_ok[ok_bpz]
bpz_SeqNr=raw_bpz_SeqNr[ok_bpz]
#fk5 is J2000 by default!
mycoords=SkyCoord(bpz_ra*u.degree,bpz_dec*u.degree,frame="fk5")
## now get ned/sdss info
sdsscoords=SkyCoord(sdsscat["RA"].data*u.degree,sdsscat["Dec"].data*u.degree,frame="fk5")
nedcoords=SkyCoord(nedcat["ra"],nedcat["dec"],frame="fk5")
# ned_gmag=nedcat['g'].data ned_imag=nedcat['i'].data ned_rmag=nedcat['r'].data
sdss_z=sdsscat["Redshift"].data
ned_z=nedcat['z'].data
##combine ned and sdss info
zspec_dec=append(nedcoords.dec.deg,sdsscoords.dec.deg)
zspec_ra=append(nedcoords.ra.deg,sdsscoords.ra.deg)
zspec_z=append(ned_z,sdss_z)
zspeccoords=SkyCoord(zspec_ra*u.degree,zspec_dec*u.degree,frame="fk5")
idx, d2d, d3d = zspeccoords.match_to_catalog_sky(mycoords)
d2d_arcsec=d2d.deg*3600
matches=mycoords[idx]
Mbpz_ra=matches.ra.deg
Mbpz_dec=matches.dec.deg
Mbpz_z=bpz_z[idx]
Mbpz_z_min=bpz_z_min[idx]
Mbpz_z_max=bpz_z_max[idx]
Mbpz_odds=bpz_odds[idx]
Mbpz_rmag=bpz_rmag[idx]
Mbpz_imag=bpz_imag[idx]
Mbpz_rmagerr=bpz_rmagerr[idx]
Mbpz_imagerr=bpz_imagerr[idx]
Mbpz_i_ok=i_ok[idx]
Mbpz_r_ok=r_ok[idx]
Mbpz_SeqNr=bpz_SeqNr[idx]
## save catalog with all relevent matched information
from astropy.io import ascii
#outtable=astropy.table.table.Table(data=[Mbpz_ra,Mbpz_dec,zspec_ra,zspec_dec,zspec_z,Mbpz_z, Mbpz_odds, d2d_arcsec],names=["Mbpz_ra","Mbpz_dec","zspec_ra","zspec_dec","zspec_z","Mbpz_z"," Mbpz_odds","d2d_arcsec"])
#outtable.write("match_specz_and_bpz_cats_uncut_final_zspecgals2mygals.tsv",format="ascii.commented_header")
## check and see how things look if I exclude detections that have a -99 anywhere
## matches are "good" if they are less than 6 pixels apart and z>.01
#adam-SHNT# the below:
## So, I run bpz on a catalog that's got both stars and galaxies in it, but I have ZMIN=.01, so it's safe to say that the BPZ results are going to be meaningless for stars, and I should remove them from the plots evaluating the effectiveness of BPZ.
#also, I've got some doubles, are these by any chance from cat_additions_from_sdss_zspecs.tsv overlapping with the zspecs?
#I think that's it, but I might need to make some of these changes in other adam_match_nedgals2mygals*.py files too!
lt6pix_apart=d2d_arcsec<1.2
gtpt01_z=zspec_z>.01
match_goodenough=lt6pix_apart*gtpt01_z
bpzra=Mbpz_ra[match_goodenough]
bpzdec=Mbpz_dec[match_goodenough]
inds=arange(len(zspec_z))
goodinds=inds[match_goodenough]
goodzspec= zspec_z[match_goodenough]
doubles=[]
doubles_ra=[]
doubles_dec=[]
for i in range(len(bpzra)):
ra=bpzra[i]
ra_matches=bpzra==ra
dec=bpzdec[i]
dec_matches=bpzdec==dec
if dec_matches.sum()>1:
if not i==dec_matches.argmax():
doubles.append((i,dec_matches.argmax()))
doubles_ra.append(ra)
doubles_dec.append(dec)
doubles_zspec_inds=[]
match_goodsingles=match_goodenough.copy()
for (i1,i2) in doubles:
print goodzspec[i1],goodzspec[i2],goodzspec[i1]-goodzspec[i2]
doubles_zspec_inds.append((goodinds[i1],goodinds[i2]))
gi1,gi2=(goodinds[i1],goodinds[i2])
d2d1=d2d_arcsec[gi1]
d2d2=d2d_arcsec[gi2]
if d2d1<d2d2:
print match_goodenough[gi2]
match_goodsingles[gi2]=False
else:
print match_goodenough[gi1]
match_goodsingles[gi1]=False
# turns out the doubles (where two zspec objects are matched to the same bpz object) are from zspec having two z's listed for a single object (one side of galaxy and the other side of the galaxy)
Mbpz_ra=matches.ra.deg
Mbpz_dec=matches.dec.deg
Mbpz_z=bpz_z[idx]
Mbpz_z_min=bpz_z_min[idx]
Mbpz_z_max=bpz_z_max[idx]
Mbpz_odds=bpz_odds[idx]
Mbpz_rmag=bpz_rmag[idx]
Mbpz_imag=bpz_imag[idx]
Mbpz_rmagerr=bpz_rmagerr[idx]
Mbpz_imagerr=bpz_imagerr[idx]
Mbpz_i_ok=i_ok[idx]
Mbpz_r_ok=r_ok[idx]
Mbpz_SeqNr=bpz_SeqNr[idx]
good={}
good['dist']= d2d_arcsec[match_goodsingles]
good['Mbpz_ra']= Mbpz_ra[match_goodsingles]
good['Mbpz_dec']= Mbpz_dec[match_goodsingles]
good['Mbpz_z']= Mbpz_z[match_goodsingles]
good['Mbpz_z_min']= Mbpz_z_min[match_goodsingles]
good['Mbpz_z_max']= Mbpz_z_max[match_goodsingles]
good['Mbpz_odds']= Mbpz_odds[match_goodsingles]
good['Mbpz_imag']= Mbpz_imag[match_goodsingles]
good['Mbpz_rmag']= Mbpz_rmag[match_goodsingles]
good['Mbpz_imagerr']= Mbpz_imagerr[match_goodsingles]
good['Mbpz_rmagerr']= Mbpz_rmagerr[match_goodsingles]
good['Mbpz_i_ok']= Mbpz_i_ok[match_goodsingles]
good['Mbpz_r_ok']= Mbpz_r_ok[match_goodsingles]
good['Mbpz_SeqNr']= Mbpz_SeqNr[match_goodsingles]
good['zspec_z']= zspec_z[match_goodsingles]
good['zspec_ra']= zspec_ra[match_goodsingles]
good['zspec_dec']= zspec_dec[match_goodsingles]
allok=Mbpz_i_ok*Mbpz_r_ok
good['Mbpz_allok']= allok[match_goodsingles]
## save catalog of the good matches
outtable=astropy.table.table.Table( data=[good['zspec_ra'],good['zspec_dec'],good['zspec_z'],good['Mbpz_z'],good['Mbpz_ra'],good['Mbpz_dec'],good['Mbpz_SeqNr'],good['Mbpz_z_min'],good['Mbpz_z_max'],good['Mbpz_odds'],good['Mbpz_imag'],good['Mbpz_rmag'],good['Mbpz_imagerr'],good['Mbpz_rmagerr'],good['Mbpz_i_ok'],good['Mbpz_r_ok'],good['Mbpz_allok'],good['dist']], \
names=['ref_ra','ref_dec','ref_z','bpz_z','bpz_ra','bpz_dec','bpz_SeqNr','bpz_z_min','bpz_z_max','bpz_odds','bpz_I','bpz_R','bpz_Ierr','bpz_Rerr','bpz_I_ok','bpz_R_ok','bpz_allok','dist'])
outtable.write("match_specz_and_bpz_cats.tsv",format="ascii.commented_header")
#outtable=astropy.table.table.Table( data=[Mbpz_ra[match_goodsingles],Mbpz_dec[match_goodsingles],zspec_ra[match_goodsingles],zspec_dec[match_goodsingles],zspec_z[match_goodsingles],Mbpz_z[match_goodsingles], Mbpz_odds[match_goodsingles], d2d_arcsec[match_goodsingles]], \
# names=["Mbpz_ra","Mbpz_dec","zspec_ra","zspec_dec","zspec_z","Mbpz_z","Mbpz_odds","d2d_arcsec"])
#outtable.write("match_specz_and_bpz_cats.tsv",format="ascii.commented_header")
#adam-SHNT# OK, i've got to save the Z_S to the catalog now, and that's the catalog adam_do_photometry.py has to use!
Z_S_SeqNr=Mbpz_SeqNr[match_goodsingles]
Z_S_z=zspec_z[match_goodsingles]
## save catalog of the good matches
bpz_path=os.path.split(bpz_file)
outtable=astropy.table.table.Table( data=[ Z_S_SeqNr, Z_S_z ], names=['SeqNr','z'])
outtable.write(bpz_path[0]+"/match_specz_and_bpz_cats.txt",format="ascii.commented_header")
|
deapplegate/wtgpipeline
|
adam-nedgals2mygals/match_specz_and_bpz_cats.py
|
Python
|
mit
| 8,999
|
[
"Galaxy"
] |
6a85b028524859f831411bd44998780744521a0f99fe07838922834cc48b8b16
|
#!/usr/bin/env python
import sys
if sys.version_info.major != 3:
sys.exit('ERROR: This script must be run using Python v3.')
from datetime import datetime, timedelta
import multiprocessing
from numpy import arcsin, array, cos, isnan, pi, radians, sin, sqrt, tan
import numpy as np
from netCDF4 import Dataset
import os
from scipy.spatial import cKDTree
import time
import calendar
# USER CONFIGURABLES
## RUN INFO
DATES = ['2018-01-01', '...', '2018-12-31']
DATE_FORMAT = '%Y-%m-%d'
THREE_DAY_MONTH = False
BASE_YEAR = 2018
if calendar.isleap(BASE_YEAR):
total_days = 366
else:
total_days = 365
REGIONS = range(1, 89)
#REGIONS = range(31, 32)
NUM_PROCS = 16
## GRID INFO
GRID_DOT_FILE = 'input/grid/GRIDDOT2D.Cali_4km_321x291'
MET_ZF_FILE = 'input/grid/METCRO3D.Cali_4km_321x291_2012_01_ZF_AVG'
NCOLS = 321
NROWS = 291
NLAYERS = 18
NUM_NONZERO_LAYERS = 12
ABL_METERS = 1000
REGION_BOX_FILE = 'input/default/region_boxes.csv'
## FLIGHT PATH INFO
TAKEOFF_ANGLES = [radians(10), radians(20), radians(30)]
LAND_ANGLES = [radians(2.5), radians(3), radians(3.5)]
RUNWAY_FILE = 'input/default/runway_info_cali_20170912.csv'
FLIGHT_FRACTS_FILE = 'input/default/flight_stage_fractions_20161004.csv'
## EMISSIONS INFO
CATEGORIES_FILE = 'input/default/aircraft_categories.py'
AREA_FILES = ['input/emis/st.ar.v0001.810.2018.2018.rf3064.SMOKEv4p0..ff10']
POINT_FILES = ['input/emis/st.ps.v0001.810.2018.2018.rf3064.SMOKEv4p0..ff10']
REGION_STRINGS_FILE = 'input/default/region_strings_FIPS_GAI.csv'
FACILITY_ID_FILE = 'input/default/facility_ids.csv'
## TEMPORAL INFO
TEMPORAL_FILE = 'input/temporal/bts_2015_2019_airport_temporal_frac_statewide_rev1.csv'
## OUTPUT INFO
VERSION = 'v0304'
GSPRO_FILE = 'input/ncf/gspro.all.S07T.11521'
GSREF_FILE = 'input/ncf/gsref_26jan2021_2018s.txt'
WEIGHT_FILE = 'input/ncf/molecular.weights.S07TMS.txt'
OUT_DIR = 'output/'
SHOULD_ZIP = False
PRINT_TOTALS = True
def main():
""" parse the command-line options and execute the program
"""
# parse configurables
config = {'DATES': DATES, 'DATE_FORMAT': DATE_FORMAT, 'THREE_DAY_MONTH': THREE_DAY_MONTH,
'BASE_YEAR': BASE_YEAR, 'NUM_PROCS': NUM_PROCS, 'REGIONS': REGIONS,
'GRID_DOT_FILE': GRID_DOT_FILE, 'MET_ZF_FILE': MET_ZF_FILE, 'NROWS': NROWS,
'NCOLS': NCOLS, 'NLAYERS': NLAYERS, 'NUM_NONZERO_LAYERS': NUM_NONZERO_LAYERS,
'ABL_METERS': ABL_METERS, 'REGION_BOX_FILE': REGION_BOX_FILE,
'TAKEOFF_ANGLES': TAKEOFF_ANGLES, 'LAND_ANGLES': LAND_ANGLES,
'RUNWAY_FILE': RUNWAY_FILE, 'FLIGHT_FRACTS_FILE': FLIGHT_FRACTS_FILE,
'CATEGORIES_FILE': CATEGORIES_FILE, 'AREA_FILES': AREA_FILES,
'POINT_FILES': POINT_FILES, 'FACILITY_ID_FILE': FACILITY_ID_FILE,
'TEMPORAL_FILE': TEMPORAL_FILE, 'REGION_STRINGS_FILE': REGION_STRINGS_FILE,
'VERSION': VERSION, 'GSPRO_FILE': GSPRO_FILE, 'GSREF_FILE': GSREF_FILE,
'WEIGHT_FILE': WEIGHT_FILE, 'OUT_DIR': OUT_DIR, 'SHOULD_ZIP': SHOULD_ZIP,
'PRINT_TOTALS': PRINT_TOTALS}
# parse command-line
a = 1
while a < len(sys.argv):
flag = sys.argv[a]
if flag.startswith('-'):
flag = flag[1:].upper()
if flag in config:
a += 1
value = sys.argv[a]
typ = type(config[flag])
if typ == list:
sub_type = type(config[flag][0])
if value == "[]": # Allow the user to input an empty list.
config[flag] = []
else:
config[flag] = [sub_type(v) for v in value.split(',')]
elif typ == bool:
config[flag] = True if value in ['True', 'true', 'TRUE', True, 1] else False
elif typ == dict:
config[flag] = eval(value)
else:
config[flag] = typ(value)
else:
usage()
a += 1
# run program
gate = GATE(config)
gate.run()
def usage():
''' Print the help menu.
'''
usage_txt = """
GATE
Usage: ./GATE.py [-FLAGS]
Create 3D CMAQ-ready NetCDF files for aircraft emissions.
Optional Arguments:
-DATES dates to model aircraft emissions
-DATE_FORMAT Python datetime format string for the above
-THREE_DAY_MONTH True if each month can be represented by 3 days
-BASE_YEAR base year
-REGIONS numerical region list
-NUM_PROCS number of parallel processes to run (1 per day)
-GRID_DOT_FILE path to CMAQ GRIDDOT2D file
-MET_ZF_FILE path to CMAQ METCRO3D file
-NCOLS number of columns in modeling domain
-NROWS number of rows in modeling domain
-NLAYERS total number of vertical layers
-NUM_NONZERO_LAYERS number of vertical layers with emissions
-ABL_METERS height of the ABL, in meters
-REGION_BOX_FILE path to CSV file with I/J box for each region
-TAKEOFF_ANGLES take-off angles to model
-LAND_ANGLES landing angles to model
-RUNWAY_FILE path to CSV with lat/lons for all runways
-FLIGHT_FRACTS_FILE path to CSV for species fractions by flight stage
-CATEGORIES_FILE path to Python file with aircraft EIC codes
-AREA_FILES path to FF10 file with area source emissions
-POINT_FILES path to FF10 file with point source emissions
-REGION_STRINGS_FILE path to CSV file with region code information
-FACILITY_ID_FILE path to CSV file with airport FAA codes
-TEMPORAL_FILE path to CSV-like file with airport temporal profiles
-VERSION string used to identify the run
-GSPRO_FILE path to SMOKE-style GSPRO file
-GSREF_FILE path to SMOKE-style GSREF file
-WEIGHT_FILE path to file with molecular weights
-OUT_DIR path to output directory
-SHOULD_ZIP True if you want to gzip outputs, False otherwise
-PRINT_TOTALS True if you want to print totals to stdout
This program can be run without commandline arguments by setting config
variables in the script.
Report bugs to <https://github.com/mmb-carb/GATE/issues>.
"""
print(usage_txt.strip())
exit()
class GATE(object):
GATE_VERSION = '0.3.2'
def __init__(self, config):
''' build each step of the model '''
config['GATE_VERSION'] = self.GATE_VERSION
self._parse_dates(config)
self.dates = config['DATES']
self.num_procs = config['NUM_PROCS']
self.emis_readr = EmissionsReader(config)
self.temp_build = TemporalSurrogateBuilder(config)
self.spat_build = SpatialSurrogateBuilder(config)
self.emis_scale = EmissionsScaler(config)
self.ncdf_write = DictToNcfWriter(config)
def run(self):
''' run each step of the model
break the final scaling and output steps into multiple processes
'''
print('\nRunning GATE Model v' + self.GATE_VERSION)
emis = self.emis_readr.read()
temp_surrs = self.temp_build.build()
spat_surrs = self.spat_build.build(emis.keys())
print('\tScaling Emissions & Writing Outputs')
jobs = []
for date_group in self.chunk_list(self.dates, self.num_procs):
j = multiprocessing.Process(target=self._scale_and_write_dates,
args=(date_group, emis, spat_surrs, temp_surrs))
jobs.append(j)
j.start()
def _scale_and_write_dates(self, dates, emis, spat_surrs, temp_surrs):
''' This is a single-process helper function for the multi-process program.
Scale emissions for a single date and write them to a CMAQ-ready NetCDF file.
'''
for date in dates:
scaled_emis, daily_emis = self.emis_scale.scale(emis, spat_surrs, temp_surrs, date)
self.ncdf_write.write(scaled_emis, daily_emis, emis, date)
def _parse_dates(self, config):
''' Allow for implicit data ranges by using ellipsis:
DATES = ['2000-01-01', '...', '2000-12-31']
Optional: If THREE_DAY_MONTH == True, the user want to only run for 3 days in each
month. And we pick the second Wed, Sat, and Sunday.
'''
fmt = config['DATE_FORMAT']
# determine if the dates are explicitly listed, or listed by range
if len(config['DATES']) == 3 and config['DATES'][1].strip() == '...':
start = datetime.strptime(config['DATES'][0], fmt)
end = datetime.strptime(config['DATES'][2], fmt)
dates = [datetime.strftime(start, fmt)]
while start < end:
start += timedelta(days=1)
dates.append(datetime.strftime(start, fmt))
else:
dates = config['DATES']
# check if the model year is a leap year, but the base year is not a leap year
#if calendar.isleap(start.year) == True and calendar.isleap(BASE_YEAR) == False:
# invalid_date = str(start.year) + '-02-29'
# dates.remove(invalid_date)
# sort date strings
config['DATES'] = sorted(dates)
# validate dates
years = set()
for dt in config['DATES']:
years.add(datetime.strptime(dt, fmt).year)
if len(years) > 1:
raise ValueError('You may only run this model for one year at a time.')
# handle the case where the user wants 3 representative days/month
if not config['THREE_DAY_MONTH']:
return
# find start and end dates of period
start = datetime.strptime(config['DATES'][0], fmt)
end = datetime.strptime(config['DATES'][-1], fmt)
# generate new dates for the 3-representative days-per-month case
dates = []
yr = start.year
months = sorted(range(start.month, end.month + 1))
for month in months:
current = datetime(start.year, month, 1)
dates.append(datetime.strftime(self._nth_weekday(current, 2, 2), fmt)) # Wednesday
dates.append(datetime.strftime(self._nth_weekday(current, 2, 5), fmt)) # Saturday
dates.append(datetime.strftime(self._nth_weekday(current, 2, 6), fmt)) # Sunday
# sort date strings
config['DATES'] = sorted(dates)
@staticmethod
def _nth_weekday(the_date, nth, week_day):
''' Find the Nth "blank" of the given month.
Where "blank" is Monday, Tuesday, etc...
'''
temp = the_date.replace(day=1)
adj = (week_day - temp.weekday()) % 7
temp += timedelta(days=adj)
temp += timedelta(weeks=nth - 1)
return temp
@staticmethod
def chunk_list(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
new_out = seq[int(last):int(last + avg)]
last += avg
if not len(new_out):
continue
out.append(new_out)
return out
class EmissionsReader(object):
def __init__(self, config):
self.area_files = config['AREA_FILES']
self.point_files = config['POINT_FILES']
cats = eval(open(config['CATEGORIES_FILE']).read())
self.eics = cats['eics']
self.comjets = cats['commercial_jets']
self.scc2eic = cats['scc2eic']
self.regions = config['REGIONS']
rsf = config['REGION_STRINGS_FILE']
self.region_strings = dict((c[1], int(c[0])) for c in [l.rstrip().split(',') for l in open(rsf, 'r').readlines()[1:]])
self.facility_ids = self.read_facility_file(config['FACILITY_ID_FILE'])
self.airports = SpatialSurrogateBuilder.read_runways(config['RUNWAY_FILE'])
self.airport_emis = {}
def read(self):
''' generates emissions dict by: airport (w/ region), EIC, and pollutant
'''
print('\tReading Emissions Files')
self._read_area_files()
self._read_point_files()
return self.airport_emis
def _read_area_files(self):
''' read all the SMOKE-ready FF10 area files for aircraft emissions,
and split those emissions by airport
'''
# read regional emissions from FF10s
area_emis = {}
for file_path in self.area_files:
self._read_area_file(file_path, area_emis)
# split emissions by airport
self._split_area_to_airports(area_emis)
def _read_area_file(self, file_path, region_emis):
''' Read a SMOKE-ready FF10 area file to get yearly aircraft Emissions (in tons)
File Format:
#DESC FF10 Nonpoint format
#DESC COUNTRY,FIPS_gai,TRIBAL_CODE,CENSUS,ID,EIC,EMIS_TYPE,POLL,ANN_VALUE,,,...
US,06059,,,,81080011400000,,CO,4.80000019073,,,,,,,81080011400000,,,...
US,06059,,,,81080011400000,,CO,13.3999996185,,,,,,,81080011400000,,,...
'''
f = open(file_path, 'r')
for line in f.readlines():
if line.startswith('#'): continue
ln = line.split(',')
if len(ln) < 10: continue
region = self.region_strings[ln[0] + ln[1]]
if region not in self.regions: continue
eic = int(ln[5])
if eic not in self.eics:
if eic not in self.scc2eic:
continue
eic = self.scc2eic[eic]
pollutant = ln[7].upper()
emis = float(ln[8]) / total_days # convert from annual to daily
if region not in region_emis:
region_emis[region] = {}
if eic not in region_emis[region]:
region_emis[region][eic] = {}
if pollutant not in region_emis[region][eic]:
region_emis[region][eic][pollutant] = 0.0
region_emis[region][eic][pollutant] += emis
f.close()
def _split_area_to_airports(self, area_emis):
''' split area source aircraft emissions from regional to airport-specific
using the number of flights at each airport
'''
for region, eic_emis in area_emis.items():
if region not in self.airports: continue
if region not in self.airport_emis:
self.airport_emis[region] = {}
# total flights for this county
total_comjet = float(sum([a['flights_comjet'] for a in self.airports[region].values()]))
total_other = float(sum([a['flights_other'] for a in self.airports[region].values()]))
for airport, airport_data in self.airports[region].items():
# build flight fractions for this airport
fraction_comjet = self.airports[region][airport]['flights_comjet'] / total_comjet
fraction_other = self.airports[region][airport]['flights_other'] / total_other
# split off emissions for just this airport
if airport not in self.airport_emis[region]:
self.airport_emis[region][airport] = {}
for eic, poll_emis in eic_emis.items():
fraction = fraction_comjet if eic in self.comjets else fraction_other
for poll, emis in poll_emis.items():
if eic not in self.airport_emis[region][airport]:
self.airport_emis[region][airport][eic] = {}
if poll not in self.airport_emis[region][airport][eic]:
self.airport_emis[region][airport][eic][poll] = 0.0
self.airport_emis[region][airport][eic][poll] += emis * fraction
@staticmethod
def read_facility_file(file_path):
''' read the airport information CSV into a custom dictionary
'''
ids = [l.rstrip().split(',') for l in open(file_path, 'r').readlines()[1:]]
return dict((int(c[0]), {'faa_lid': c[1], 'region': int(c[2]), 'name': c[3]}) for c in ids)
def _read_point_files(self):
''' read all the SMOKE-ready FF10 point files for aicraft emissions,
and split those emissions by airport
'''
for file_path in self.point_files:
self._read_point_file(file_path)
def _read_point_file(self, file_path):
''' Read a SMOKE-ready FF10 point file to get yearly aircraft Emissions (in tons)
File Format:
#DESC FF10 Point format
#DESC COUNTRY,FIPS_gai,TRIBAL_CODE,FACILITY_ID,POINT_ID,STACK_ID,SEGMENT_ID,AGY_FACILITY_ID,AGY_UNIT_ID,AGY_REL_POINT_ID,AGY_PROCESS_ID,EIC,POLL,ANN_TOTAL,ANN_PCT_RED,FACILITY_NAME,ERPTYPE,STKHGT,STKDIAM,STKTEMP,STKFLOW,STKVEL,NAICS,LONGITUDE,LATITUDE,LL_DATUM,HORIZ_COLL_MTHD,DESIGN_CAPACITY,DESIGN_CAPACITY_UNITS,SIC,FAC_SOURCE_TYPE,UNIT_TYPE_CODE,CONTROL_IDS,CONTROL_MEASURES,CURRENT_COST,CUMULATIVE_COST,PROJECTION_FACTOR,SUBMITTER_FAC_ID,CALC_METHOD,DATA_SET_ID,FACIL_CATEGORY,ORIS_FACILITY_CODE,ORIS_BOILER_ID,IPM_YN,CALC_YEAR,DATE_UPDATED,FUG_HEIGHT,FUG_WIDTH_YDIM,FUG_LENGTH_XDIM,FUG_ANGLE,ZIPCODE,ANNUAL_AVG_HR_YR,JAN_VALUE,FEB_VALUE,MAR_VALUE,APR_VALUE,MAY_VALUE,JUN_VALUE,JUL_VALUE,AUG_VALUE,SEP_VALUE,OCT_VALUE,NOV_VALUE,DEC_VALUE,JAN_PCTRED,FEB_PCTRED,MAR_PCTRED,APR_PCTRED,MAY_PCTRED,JUN_PCTRED,JUL_PCTRED,AUG_PCTRED,SEP_PCTRED,OCT_PCTRED,NOV_PCTRED,DEC_PCTRED,COMMENT
US,006059,,180002,3,0,1,,,,,81080411400000,CO,0.24699999392,,"Brackett Field",,121.4,11.1,699.5,,22.6,811420.0,-117.78167,34.091667,,,,,4581,,,,,,,,,,,,,,,,,,,,,91750.0,,,,,,,,,,,,,,,,,,,,,,,,,
US,006059,,180002,5,0,1,,,,,81080411400000,CO,194.68699646,,"Brackett Field",,121.4,11.1,699.5,,22.6,811420.0,-117.78167,34.091667,,,,,4581,,,,,,,,,,,,,,,,,,,,,91750.0,,,,,,,,,,,,,,,,,,,,,,,,,
'''
f = open(file_path, 'r')
facs_not_found = set()
for line in f.readlines():
if line.startswith('#'): continue
ln = line.split(',')
if len(ln) < 14: continue
fac_id = int(ln[3])
if fac_id not in self.facility_ids:
facs_not_found.add(str(fac_id))
continue
facility = self.facility_ids[fac_id]
region = facility['region']
if region not in self.regions: continue
eic = int(ln[11])
if eic not in self.eics:
if eic not in self.scc2eic:
continue
eic = self.scc2eic[eic]
poll = ln[12].upper()
emis = float(ln[13]) / total_days # convert from annual to daily
location = facility['faa_lid']
if region not in self.airport_emis:
self.airport_emis[region] = {}
if location not in self.airport_emis[region]:
self.airport_emis[region][location] = {}
if eic not in self.airport_emis[region][location]:
self.airport_emis[region][location][eic] = {}
if poll not in self.airport_emis[region][location][eic]:
self.airport_emis[region][location][eic][poll] = 0.0
self.airport_emis[region][location][eic][poll] += emis
if facs_not_found:
print('\t\tThese facility IDs were not found. Their emissions will be dropped:\n\t\t' +
' '.join(sorted(facs_not_found)))
f.close()
class TemporalSurrogateBuilder(object):
DEFAULT = -1
def __init__(self, config):
self.base_year = int(config['BASE_YEAR'])
self.date_format = config['DATE_FORMAT']
self.dates = [datetime.strptime(d, self.date_format) for d in sorted(set(config['DATES']))]
cats = eval(open(config['CATEGORIES_FILE']).read())
self.eics = cats['eics']
self.temp_file = config['TEMPORAL_FILE']
self.file_profs = self._read_temp_file()
self.temp_profs = {}
def build(self):
''' read temporal profile dict by: airport, EIC
create diurnal profiles for: monthly, weekly, and diurnal
'''
self.temp_profs = {}
# create a full set of scaling factors for each date
for d in self.dates:
d_str = datetime.strftime(d, self.date_format)
self.temp_profs[d_str] = {}
dow = datetime(self.base_year, d.month, d.day).weekday()
month = d.month - 1
self.temp_profs[d_str] = {}
# and each airport, individually
for airport in self.file_profs:
if airport not in self.temp_profs[d_str]:
self.temp_profs[d_str][airport] = {}
for eic in [-1] + self.eics:
def_eic = eic if eic in self.file_profs[airport] else self.DEFAULT
# check if default has already been included
if def_eic not in self.temp_profs[d_str][airport]:
self.temp_profs[d_str][airport][def_eic] = {}
else:
continue
# monthly scaling factor
try:
factor_month = self.file_profs[airport][def_eic]['monthly'][month]
except:
try:
factor_month = self.file_profs[self.DEFAULT][def_eic]['monthly'][month]
except:
factor_month = self.file_profs[self.DEFAULT][self.DEFAULT]['monthly'][month]
# dow scaling factor
try:
factor_dow = self.file_profs[airport][def_eic]['weekly'][dow]
except:
try:
factor_dow = self.file_profs[self.DEFAULT][def_eic]['weekly'][dow]
except:
factor_dow = self.file_profs[self.DEFAULT][self.DEFAULT]['weekly'][dow]
# 24-hr diurnal scaling factors
diurn = 'diurnal_weekday' if dow < 5 else 'diurnal_weekend'
try:
factors_diurnal = self.file_profs[airport][def_eic][diurn]
except:
try:
factors_diurnal = self.file_profs[self.DEFAULT][def_eic][diurn]
except:
factors_diurnal = self.file_profs[self.DEFAULT][self.DEFAULT][diurn]
# combine scaling factors to a resultant 24-hr cycle
self.temp_profs[d_str][airport][def_eic] = [f * factor_month * factor_dow for f in factors_diurnal]
return self.temp_profs
def _read_temp_file(self):
''' Read the custom GATE temporal profile file.
NOTE: It is up to the creator of this file to ensure either:
a) a full set of data, or
b) a full set of data defaults
airport,eic,type,fractions|
default,default,monthly,0.962509|0.974175|0.989383|0.994767|...
default,default,weekly,1.03601|1.017904|1.025875|1.015781|...
'''
# rearrange file above into usable data, taking care of defaults appropriately
profiles = {}
default = 'default'
sep = '|'
options = {'monthly': 12, 'weekly': 7, 'diurnal_weekday': 24, 'diurnal_weekend': 24}
# read file header to get column separator
f = open(self.temp_file, 'r')
last_col = f.readline().rstrip().split(',')[-1]
if last_col.startswith('fractions') and len(last_col) == (len('fractions') + 1):
sep = last_col[-1]
# parse all lines in file into dict
lines = [line.rstrip().split(',') for line in f.readlines()]
lines = filter(lambda l: len(l) > 3, lines)
data = dict((tuple(line[:3]), [float(v) for v in line[-1].split(sep)]) for line in lines)
# ignore the case: default type
for key in list(data.keys()):
if key[2] == default:
print('\t\tERROR: Temporal profiles can not have a default type')
del data[key]
# handle the case: full-default profiles
for typ in ['monthly', 'weekly', 'diurnal_weekday', 'diurnal_weekend']:
default_key = (default, default, typ)
if default_key in data:
if self.DEFAULT not in profiles: # default airport
profiles[self.DEFAULT] = {}
if self.DEFAULT not in profiles[self.DEFAULT]: # default EIC
profiles[self.DEFAULT][self.DEFAULT] = {}
profiles[self.DEFAULT][self.DEFAULT][typ] = data[default_key]
del data[default_key]
else:
raise ValueError("Default temporal profile missing: " + str(default_key))
# handle the case: default airport, but not EIC
for key in list(data.keys()):
if key[0] == default and key[1] != default:
if self.DEFAULT not in profiles:
profiles[self.DEFAULT] = {}
if key[1] not in profiles[self.DEFAULT]:
profiles[self.DEFAULT][int(key[1])] = {}
profiles[self.DEFAULT][int(key[1])][key[2]] = data[key]
del data[key]
# handle the case: default EIC, but not airport
for key in list(data.keys()):
if key[0] != default and key[1] == default:
if key[0] not in profiles:
profiles[key[0]] = {}
if self.DEFAULT not in profiles[key[0]]:
profiles[key[0]][self.DEFAULT] = {}
profiles[key[0]][self.DEFAULT][key[2]] = data[key]
del data[key]
# handle the case: specific airport and EIC (all that is left)
for key in data:
airport = key[0]
eic = int(key[1])
if airport not in profiles:
profiles[airport] = {}
if eic not in profiles[airport]:
profiles[airport][eic] = {}
profiles[airport][eic][key[2]] = data[key]
return profiles
class SpatialSurrogateBuilder(object):
RAD_FACTOR = np.float32(pi / 180.0) # need angles in radians
def __init__(self, config):
self.nrows = config['NROWS']
self.ncols = config['NCOLS']
self.nlayers = config['NLAYERS']
self.abl_meters = config['ABL_METERS']
self.zf_file = config['MET_ZF_FILE']
self.corners_file = config['GRID_DOT_FILE']
self.zf = None
self._read_grid_heights()
self.lat_dot = None
self.lon_dot = None
self._read_grid_corners_file()
self.surrogates = dict((r, {}) for r in config['REGIONS'])
self.regions = config['REGIONS']
self.region_boxes = self.read_region_box_file(config['REGION_BOX_FILE'])
self.kdtree = self.create_kdtrees()
self.takoff_angles = config['TAKEOFF_ANGLES']
self.landing_angles = config['LAND_ANGLES']
self.airports = self.read_runways(config['RUNWAY_FILE'])
self.flight_fracts_file = config['FLIGHT_FRACTS_FILE']
self.flight_fracts = self.read_flight_fracts()
def build(self, regions=None):
''' build spatial surrogate by: region, airport, pollutant, and grid cell
'''
print('\tBuilding Spatial Surrogates')
# pull regions from emissions files, if you can
if regions:
self.regions = regions
# build spatial surrogates
for region in self.regions:
if region not in self.airports:
print('\t\tNo airports given for region #' + str(region) + '. Skipping.')
continue
for location, location_data in self.airports[region].items():
if location not in self.surrogates[region]:
self.surrogates[region][location] = {}
# build a spatial surrogate for a single location
if location_data['runways']:
self._build_airport(location, location_data['runways'], region)
return self.surrogates
def _build_airport(self, airport, runway_data, region):
''' Build a spatial surrogate for airplanes taking off from an airport
with, potentiall, multiple runways.
'''
land_scalar = 1.0 / float(len(self.landing_angles))
toff_scalar = 1.0 / float(len(self.takoff_angles))
runway_count = 0
land_new = {}
toff_new = {}
taxi_new = {}
for lat0, lon0, lat1, lon1 in runway_data:
runway_count += 1
# memoize the grid cell of both ends of the runway
cell0 = tuple(self.find_grid_cell((0.0, lon0, lat0), region))
cell1 = tuple(self.find_grid_cell((0.0, lon1, lat1), region))
# landing
land = {}
for angle in self.landing_angles:
self.add_dict(land, self._gen_surrogate_1runway(region, lat0, lon0, lat1, lon1, angle, cell0))
self.scale_dict(land, land_scalar)
# take-off
toff = {}
for angle in self.takoff_angles:
self.add_dict(toff, self._gen_surrogate_1runway(region, lat1, lon1, lat0, lon0, angle, cell1))
self.scale_dict(toff, toff_scalar)
# taxi-ing
if cell0 != cell1:
taxi = {cell0: 0.5, cell1: 0.5}
else:
taxi = {cell0: 1.0}
if runway_count not in land_new:
land_new[runway_count] = land
if runway_count not in toff_new:
toff_new[runway_count] = toff
if runway_count not in taxi_new:
taxi_new[runway_count] = taxi
# Add runways activity
land = {}
toff = {}
taxi = {}
for num in range(runway_count):
self.add_dict(land, land_new[num + 1])
self.add_dict(toff, toff_new[num + 1])
self.add_dict(taxi, taxi_new[num + 1])
self._normalize_surrogate(land)
self._normalize_surrogate(toff)
self._normalize_surrogate(taxi)
# fill surrogates by eic and pollutant
for eic, poll_fracts in self.flight_fracts.items():
if eic not in self.surrogates[region][airport]:
self.surrogates[region][airport][eic] = {}
for poll, fractions in poll_fracts.items():
# copy surrogates, so they can be reused
surr = land.copy()
taxi1 = taxi.copy()
toff1 = toff.copy()
# scale surrogate by flight phase fractions
self.scale_dict(surr, fractions['landing'])
self.scale_dict(taxi1, fractions['taxiing'])
self.scale_dict(toff1, fractions['takeoff'])
# sum three flight phase surrogates together
self.add_dict(surr, taxi1)
self.add_dict(surr, toff1)
# add to final spatial surrogate collection
self.surrogates[region][airport][eic][poll] = surr
@staticmethod
def add_dict(orig, new):
''' sum the element in two flat dictionaries
'''
for key in new:
if key not in orig:
orig[key] = new[key]
else:
orig[key] += new[key]
@staticmethod
def scale_dict(d, factor):
''' scaled the float elements in a flat dictionary
'''
for key in d:
d[key] *= factor
@staticmethod
def read_region_box_file(file_path):
''' Read the region box CSV file into a custom dictionary
'''
boxes = [c.rstrip().split(',') for c in open(file_path, 'r').readlines()[1:]]
return dict((int(b[0]), {'lat': (int(b[1]), int(b[2])), 'lon': (int(b[3]), int(b[4]))}) for b in boxes)
def _read_grid_heights(self):
'''Read the heights of all the grid layers in the modeling domain.
NOTE: Layer heights are presumed to be in units of Meters.
NOTE: This function produces only time-independent grid layer heights.
'''
# read in grid cell heights
data = Dataset(self.zf_file, 'r')
# units must be Meters
if data.variables[u'ZF'].units.strip() != 'M':
raise ValueError('Grid file is not in units of meters: ' + file_path)
self.zf = data.variables[u'ZF'][0]
data.close()
# validate dimensions
if self.zf.shape != (self.nlayers, self.nrows, self.ncols):
raise ValueError('Grid file has wrong number of vertical dimensions: ' + self.zf_file)
def _read_grid_corners_file(self):
'''Read the NetCDF-formatted, CMAQ-ready grid definition file "DOT file" to read
the corners of each grid cell.
The results should be of dimensions one more than the grid dimensions.
'''
# read in gridded lat/lon
data = Dataset(self.corners_file, 'r')
self.lat_dot = data.variables['LATD'][0][0]
self.lon_dot = data.variables['LOND'][0][0]
data.close()
# validate dimensions
if (self.lat_dot.shape[0] != self.nrows + 1) or (self.lon_dot.shape[0] != self.nrows + 1):
raise ValueError('The grid file has the wrong number of columns: ' + self.corners_file)
elif (self.lat_dot.shape[1] != self.ncols + 1) or (self.lon_dot.shape[1] != self.ncols + 1):
raise ValueError('The grid file has the wrong number of rows: ' + self.corners_file)
def _gen_surrogate_1runway(self, region, lat0, lon0, lat1, lon1, angle, cell0=None):
''' generate a sparse-matrix 3D spatial surrogate
for a single runway, going one direction
'''
surrogate = {}
# how long is the runway?
runway_length = self.haversine(lon0, lat0, lon1, lat1)
# build trajectory line (layers/vertical, cols/lon, rows/lat))
h = runway_length * tan(angle)
p1 = array([0.0, lon0, lat0], dtype=float)
p2 = array([h, lon1, lat1], dtype=float)
p_end = self._find_end_point(p1, p2, self.abl_meters)
# subset grid
if cell0:
start_bottom = np.array(cell0)
else:
start_bottom = self.find_grid_cell(p1, region)
start_bottom[0] = 0
end_top = self.find_grid_cell(p_end, region)
end_top[0] = self.abl_meters
# intersect the trajectory with the grid
cells_by_meter = self.bresenham_line_3d(start_bottom, end_top)
surrogate = self._convert_vertical_to_grid(cells_by_meter)
self._normalize_surrogate(surrogate)
return surrogate
def _convert_vertical_to_grid(self, cells_by_meter):
''' The vertical grid cells are given in feet, but need to
be coverted to grid cell number. This will create a lot of
duplication, so the cell list is converted to a dict.
'''
cells = {}
for cell in cells_by_meter:
z_meters, x, y = cell
z = self._find_vertical_grid_cell(z_meters, x, y)
p = (z, x, y)
if p not in cells:
cells[p] = 0
cells[p] += 1
return cells
@staticmethod
def _normalize_surrogate(surrogate):
''' ensure that all the grid cells in the surrogate add to 1.0.
'''
total = float(sum(surrogate.values()))
for cell in surrogate:
surrogate[cell] /= total
def _find_vertical_grid_cell(self, z_meters, x, y):
'''Given the x and y grid cell, and the height (z)
in meters, calculate what vertical grid cell the point
lay in.
'''
z = 0
layers = [self.zf[i][y][x] for i in range(len(self.zf))]
for i, layer in enumerate(layers):
if layer > z_meters:
z = i
break
z = i
return i
def find_grid_cell(self, p, region):
''' Find the grid cell location of a single point in our 3D grid.
(Point given as a tuple (height in meters, lon in degrees, lat in degrees)
'''
lat_min, lat_max = self.region_boxes[region]['lat']
lon_min, lon_max = self.region_boxes[region]['lon']
# define parameters
lon0 = p[1] * self.RAD_FACTOR
lat0 = p[2] * self.RAD_FACTOR
# run KD Tree algorithm
clat0,clon0 = cos(lat0),cos(lon0)
slat0,slon0 = sin(lat0),sin(lon0)
dist_sq_min, minindex_1d = self.kdtree[region].query([clat0*clon0, clat0*slon0, slat0])
y, x = np.unravel_index(minindex_1d, (lat_max - lat_min, lon_max - lon_min))
y = lat_min + y
x = lon_min + x - 1
# truncate values that have gone past the grid boundaries
if y < 0:
y = 0
elif y >= self.nrows:
y = self.nrows - 1
if x < 0:
x = 0
elif x >= self.ncols:
x = self.ncols - 1
# find vertical grid cell
z = 0
if p[0] > 0:
layers = [self.zf[i][y][x] for i in range(len(self.zf))]
for i, layer in enumerate(layers):
if layer > p[0]:
z = i
break
z = i
return array([z, x, y], dtype=int)
def _is_point_in_2d_cell(self, p, x, y):
''' Test if the point "p" is inside the grid cell at x,y.
Return a tuple of the shift you will need to make to find the correct grid cell.
Returns (0, 0) when you are in the correct cell.
'''
x_shift = 0
y_shift = 0
# test the Y/lat coordinate
if y > 0 and p[2] < self.lat_dot[x][y]:
y_shift = -1
elif y < (self.nrows[2] - 1) and p[2] > self.lat_dot[x][y + 1]:
y_shift = 1
# test the X/lon coordinate
if x < (self.ncols[1] - 1) and p[1] < self.lon_dot[x + 1][y]:
x_shift = 1
elif x > 0 and p[1] > self.lon_dot[x][y]:
x_shift = -1
return (x_shift, y_shift)
def create_kdtrees(self):
""" Create a KD Tree for the entire state """
lat_vals = self.lat_dot[:] * self.RAD_FACTOR
lon_vals = self.lon_dot[:] * self.RAD_FACTOR
kdtrees = {}
for region in self.surrogates.keys():
# find the grid cell bounding box for the region in question
lat_min, lat_max = self.region_boxes[region]['lat']
lon_min, lon_max = self.region_boxes[region]['lon']
# slice grid down to this region
latvals = lat_vals[lat_min:lat_max, lon_min:lon_max]
lonvals = lon_vals[lat_min:lat_max, lon_min:lon_max]
# create tree
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
triples = list(zip(np.ravel(clat*clon), np.ravel(clat*slon), np.ravel(slat)))
kdtrees[region] = cKDTree(triples)
return kdtrees
@staticmethod
def bresenham_line_3d(p1, p2):
""" Bresenham's line algorithm, extended to 3D """
points = []
z0, x0, y0 = tuple(p1)
z1, x1, y1 = tuple(p2)
dx = abs(x1 - x0)
dy = abs(y1 - y0)
dz = abs(z1 - z0)
z, x, y = z0, x0, y0
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
sz = -1 if z0 > z1 else 1
if dz > dx and dz > dy:
err_x = dz / 2.0
err_y = dz / 2.0
while z != z1:
points.append((z, x, y))
err_x -= dx
if err_x < 0:
x += sx
err_x += dz
err_y -= dy
if err_y < 0:
y += sy
err_y += dz
z += sz
elif dx > dy:
err_z = dx / 2.0
err_y = dx / 2.0
while x != x1:
points.append((z, x, y))
err_y -= dy
if err_y < 0:
y += sy
err_y += dx
err_z -= dz
if err_z < 0:
z += sz
err_z += dx
x += sx
else:
err_x = dy / 2.0
err_z = dy / 2.0
while y != y1:
points.append((z, x, y))
err_x -= dx
if err_x < 0:
x += sx
err_x += dy
err_z -= dz
if err_z < 0:
z += sz
err_z += dy
y += sy
points.append(p2)
return points
@staticmethod
def haversine(lon0, lat0, lon1, lat1):
""" Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
Source: http://stackoverflow.com/questions/4913349/
haversine-formula-in-python-bearing-and-distance-between-two-gps-points
"""
# convert decimal degrees to radians
lon0, lat0, lon1, lat1 = map(radians, [lon0, lat0, lon1, lat1])
# haversine formula
dlon = lon1 - lon0
dlat = lat1 - lat0
a = sin(dlat / 2.0) ** 2 + cos(lat0) * cos(lat1) * sin(dlon / 2.0) ** 2
c = 2.0 * arcsin(sqrt(a))
r = 6.371e6 # radius of Earth in meters (use 2.088768e7 for feet).
return c * r
@staticmethod
def _find_end_point(p1, p2, z_end):
''' Given two points defining a 3D line, find the X and Y coordinates
for a given Z coordinate. Using the eqn of a 3D line:
P = p1 + t(p2 - p1)
where: P = (z_end, x_end, y_end)
thus: t = (z_end - p1[0]) / (p2[0] - p1[0])
x_end = p1[1] + t * (p2[1] - p1[1])
y_end = p1[2] + t * (p2[2] - p1[2])
'''
t = (z_end - p1[0]) / abs(p2[0] - p1[0])
x_end = p1[1] + t * (p2[1] - p1[1])
y_end = p1[2] + t * (p2[2] - p1[2])
return SpatialSurrogateBuilder._nan_to_zero(array([z_end, x_end, y_end], dtype=float))
@staticmethod
def _nan_to_zero(a):
''' Change all the NaN/nan values in a numpy array to 0. '''
a[isnan(a)] = 0
return a
def read_flight_fracts(self):
''' read the GATE-custom fractions file that divides emissions between
the 3 flight stages by pollutant and EIC
File format:
EIC,Pollutant,Landing,Taxiing,Takeoff
81080011400000,PM,0.213454075,0.420439845,0.36610608
81080211400000,PM,0.213454075,0.420439845,0.36610608
'''
# open file for reading
f = open(self.flight_fracts_file, 'r')
# parse header
header = f.readline().rstrip().lower().split(',')
eics_col = header.index('eic') if 'eic' in header else 0
poll_col = header.index('pollutant') if 'pollutant' else 1
land_col = header.index('landing') if 'landing' else 2
taxi_col = header.index('taxiing') if 'taxiing' else 3
take_col = header.index('takeoff') if 'takeoff' else 4
# read file line-by-line
fracts = {}
for line in f.readlines():
# parse line
ln = line.rstrip().split(',')
if len(ln) < 5: continue
eic = int(ln[eics_col])
poll = ln[poll_col].upper()
f_land = abs(float(ln[land_col]))
f_taxi = abs(float(ln[taxi_col]))
f_take = abs(float(ln[take_col]))
# normalize fractions, just in case
total = f_land + f_taxi + f_take
f_land /= total
f_taxi /= total
f_take /= total
# fill fraction dict
if eic not in fracts:
fracts[eic] = {}
fracts[eic][poll] = {'landing': f_land, 'taxiing': f_taxi, 'takeoff': f_take}
f.close()
return fracts
@staticmethod
def read_runways(file_path):
''' Read custom runways file,
to build a dictionary of all runways by region
File Format:
airport,region,runway,flights_comjet,flights_other,land_lat,land_lon,takeoff_lat,takeoff_lon
LAX,59,06R/24L,142069,16996,33.9467474722,-118.435327222,33.9501944444,-118.401668667
LAX,59,07L/25R,142069,16996,33.9358305833,-118.41934175,33.9398771944,-118.379776944
'''
airports = {}
f = open(file_path, 'r')
# parse header for column numbers
header = f.readline().rstrip().lower().split(',')
airport_col = header.index('airport') if 'airport' in header else 0
regions_col = header.index('region') if 'region' in header else 1
flights_comjet_col = header.index('flights_comjet') if 'flights_comjet' in header else 3
flights_other_col = header.index('flights_other') if 'flights_other' in header else 4
landlat_col = header.index('land_lat') if 'land_lat' in header else 5
landlon_col = header.index('land_lon') if 'land_lon' in header else 6
takelat_col = header.index('takeoff_lat') if 'takeoff_lat' in header else 7
takelon_col = header.index('takeoff_lon') if 'takeoff_lon' in header else 8
# read file, line by line
for line in f.readlines():
# parse line
ln = line.rstrip().split(',')
if len(ln) < 7: continue
airport = ln[airport_col]
region = int(ln[regions_col])
flights_comjet = int(float(ln[flights_comjet_col]))
flights_other = int(float(ln[flights_other_col]))
land_lat = float(ln[landlat_col])
land_lon = float(ln[landlon_col])
take_lat = float(ln[takelat_col])
take_lon = float(ln[takelon_col])
# simplify the logic matching flight numbers to emission inventories
if flights_comjet <= 0:
flights_comjet = 1
if flights_other <= 0:
flights_other = 1
# fill output dict
if region not in airports:
airports[region] = {}
if airport not in airports[region]:
airports[region][airport] = {'flights_comjet': 0, 'flights_other': 0, 'runways': []}
airports[region][airport]['flights_comjet'] += flights_comjet
airports[region][airport]['flights_other'] += flights_other
airports[region][airport]['runways'].append((land_lat, land_lon, take_lat, take_lon))
return airports
class EmissionsScaler(object):
DEFAULT = -1
def __init__(self, config):
pass
def scale(self, emis, spat_surrs, temp_surrs, date):
''' Create daily, gridded aircraft emissions
Inputs:
Emissions - multi-layer dictionary
keys: region -> airport -> EIC -> pollutant -> tons/day
Spatial Surrogates - multi-layer dictionary
keys: region -> airport -> EIC -> pollutant -> grid cell -> fraction
Temporal Surrogates - multi-layer dictionary (-1 is default airport code)
keys: date_string -> region -> airport -> 24-hourly fractions
Output:
Gridded Emissions - multi-layer dictionary
keys: date_string -> EIC -> hr -> poll -> grid cell -> tons/day
'''
print('\t\tScaling & Writing: ' + date)
scaled_emis = {}
daily_emis = {}
temporal = temp_surrs[date]
for region, region_emis in emis.items():
if region not in daily_emis:
daily_emis[region] = {}
for airport, airport_emis in region_emis.items():
surrs = spat_surrs[region][airport]
diurnal_by_eic = temporal[airport] if airport in temporal else temporal[self.DEFAULT]
for eic, polls in airport_emis.items():
diurnal = diurnal_by_eic[eic] if eic in diurnal_by_eic else diurnal_by_eic[self.DEFAULT]
if eic not in scaled_emis:
scaled_emis[eic] = dict((hr, {}) for hr in range(24))
if eic not in daily_emis[region]:
daily_emis[region][eic] = {}
for hr in range(24):
fraction_hr = diurnal[hr]
if fraction_hr == 0.0:
continue
for poll, val in polls.items():
if poll not in scaled_emis[eic][hr]:
scaled_emis[eic][hr][poll] = {}
if poll not in daily_emis[region][eic]:
daily_emis[region][eic][poll] = 0.0
val0 = val * fraction_hr
daily_emis[region][eic][poll] += val0
for cell, fraction_cell in surrs[eic][poll].items():
if cell not in scaled_emis[eic][hr][poll]:
scaled_emis[eic][hr][poll][cell] = 0.0
scaled_emis[eic][hr][poll][cell] += val0 * fraction_cell
return scaled_emis, daily_emis
class DictToNcfWriter(object):
STONS_HR_2_G_SEC = 251.99583333333334
POLLS = ['CO', 'NH3', 'NOX', 'SOX', 'PM', 'TOG']
def __init__(self, config):
self.directory = config['OUT_DIR']
cats = eval(open(config['CATEGORIES_FILE']).read())
self.eics = cats['eics']
self.nrows = config['NROWS']
self.ncols = config['NCOLS']
self.nlayers = config['NUM_NONZERO_LAYERS']
self.version = config['VERSION']
self.grid_file = config['GRID_DOT_FILE']
self.gspro_file = config['GSPRO_FILE']
self.gsref_file = config['GSREF_FILE']
self.weight_file = config['WEIGHT_FILE']
self.should_zip = config['SHOULD_ZIP']
self.three_day_month = config['THREE_DAY_MONTH']
self.print_totals = config['PRINT_TOTALS']
self.gspro = {}
self.gsref = {}
self.groups = {}
self.num_species = 0
self.base_year = int(config['BASE_YEAR'])
self.date_format = config['DATE_FORMAT']
self.dates = config['DATES']
self.in_file = config['POINT_FILES'][0] if config['POINT_FILES'] else config['AREA_FILES'][0] if config['AREA_FILES'] else ''
self.in_file = self.in_file.split('/')[-1]
# build some custom text to put in the NetCDF header
file_desc = "gspro: " + self.gspro_file.split('/')[-1] + " gsref: " + \
self.gsref_file.split('/')[-1] + " molecular weights: " + \
self.weight_file.split('/')[-1] + " FF10 point emis: " + \
','.join([pf.split('/')[-1] for pf in config['POINT_FILES']]) + \
" FF10 area emis: " + \
','.join([af.split('/')[-1] for af in config['AREA_FILES']])
history = "3D-gridded aircraft emissions, created by the GATE model v" + \
config['GATE_VERSION'] + " on " + datetime.strftime(datetime.now(), '%Y-%m-%d')
# hard-coded vertical layer heights
vglvls = np.float32([1.0, 0.9958, 0.9907, 0.9846, 0.9774, 0.9688, 0.9585, 0.9463, 0.9319,
0.9148, 0.8946, 0.8709, 0.8431, 0.8107, 0.7733, 0.6254, 0.293, 0.0788, 0.0])
# validate number of non-zero layers
total_layers = config['NLAYERS']
if self.nlayers > total_layers:
self.nlayers = total_layers
# crop layer heights to non-zero layers
if total_layers > self.nlayers:
vglvls = vglvls[:self.nlayers + 1]
# default NetCDF header for on-road emissions on California's 4km modeling domain
self.header = {'IOAPI_VERSION': "$Id: @(#) ioapi library version 3.1 $" + " "*43,
'EXEC_ID': "?"*16 + " "*64,
'FTYPE': 1, # file type ID
'STIME': 80000, # start time e.g. 80000 (for GMT)
'TSTEP': 10000, # time step e.g. 10000 (1 hour)
'NTHIK': 1, # Domain: perimeter thickness (boundary files only)
'NCOLS': self.ncols, # Domain: number of columns in modeling domain
'NROWS': self.nrows, # Domain: number of rows in modeling domain
'NLAYS': self.nlayers, # Domain: number of vertical layers
'GDTYP': 2, # Domain: grid type ID (lat-lon, UTM, RADM, etc...)
'P_ALP': 30.0, # Projection: alpha
'P_BET': 60.0, # Projection: betha
'P_GAM': -120.5, # Projection: gamma
'XCENT': -120.5, # Projection: x centroid longitude
'YCENT': 37.0, # Projection: y centroid latitude
'XORIG': -684000.0, # Domain: -684000 for CA_4k, -84000 for SC_4k
'YORIG': -564000.0, # Domain: -564000 for CA_4k, -552000 for SC_4k
'XCELL': 4000.0, # Domain: x cell width in meters
'YCELL': 4000.0, # Domain: y cell width in meters
'VGTYP': 7, # Domain: grid type ID (lat-lon, UTM, RADM, etc...)
'VGTOP': np.float32(10000.0), # Domain: Top Vertical layer at 10km
'VGLVLS': vglvls, # Domain: Vertical layer locations
'GDNAM': "CMAQ Emissions ",
'UPNAM': "GATE ",
'FILEDESC': file_desc,
'HISTORY': history}
# Read speciation profiles & molecular weight files
self._load_weight_file()
self._load_gsref()
self._load_gspro()
def write(self, scaled_emis, daily_emis, in_emis, date):
''' Write a CMAQ-ready NetCDF file for a single day
'''
# get Julian date
dt = datetime.strptime(date, self.date_format)
jdate = int(str(dt.year) + datetime(self.base_year, dt.month, dt.day).strftime('%j'))
# create empty netcdf file (including file path)
out_path = self._build_custom_file_path(dt)
ncf, gmt_shift = self._create_netcdf(out_path, dt, jdate)
# fill netcdf file with data
self._fill_grid(in_emis, daily_emis, scaled_emis, date, ncf, gmt_shift, out_path)
# compress output file
if self.should_zip:
os.system('gzip -1 ' + out_path)
def _fill_grid(self, in_emis, daily_emis, scaled_emissions, date, ncf, gmt_shift, out_path):
''' Fill the entire modeling domain with a 3D grid for each pollutant.
Fill the emissions values in each grid cell, for each polluant.
Create a separate grid set for each date.
Old Emis format: region -> date -> hr -> EIC -> poll_grid
New Emis format: EIC -> hr -> poll -> grid cell -> tons/day
'''
# find species position by pollutant
species = {}
for group in self.groups:
for i in range(len(np.atleast_1d(self.groups[group]['species']))):
species[self.groups[group]['species'][i]] = {'group': group, 'index': i}
# some mass fractions are not EIC dependent
nox_fraction = self.gspro['DEFNOX']['NOX']
sox_fraction = self.gspro['SOX']['SOX']
dropped_eics = set()
for hour in range(24):
# adjust hr for DST
if gmt_shift == '19':
hr = (hour - 1) % 24
else:
hr = hour
for poll in self.POLLS:
for spec in self.groups[poll]['species']:
# get species information
ind = species[spec]['index']
# build default emissions grid, for the sum of all EICs
grid = np.zeros((self.nlayers, self.nrows, self.ncols), dtype=np.float32)
for eic, eic_data in scaled_emissions.items():
if poll not in eic_data[hour]: continue
# species fractions
fraction = self.STONS_HR_2_G_SEC / self.groups[poll]['weights'][ind]
if poll == 'TOG':
if int(eic) in self.gsref:
if self.gspro[self.gsref[int(eic)]['TOG']]['TOG'][ind] <= 0:
continue
fraction = self.STONS_HR_2_G_SEC * self.gspro[self.gsref[int(eic)]['TOG']]['TOG'][ind]
else:
dropped_eics.add(eic)
continue
elif poll == 'PM':
if int(eic) in self.gsref:
fraction *= self.gspro[self.gsref[int(eic)]['PM']]['PM'][ind]
else:
dropped_eics.add(eic)
continue
elif poll == 'NOX':
fraction *= nox_fraction[ind]
elif poll == 'SOX':
fraction *= sox_fraction[ind]
self._add_grid_cells(grid, eic_data[hour][poll], fraction)
# write data block to file
ncf.variables[spec][hr,:,:,:] = grid
# last hour is the same as the first
if hr == 0:
ncf.variables[spec][24,:,:,:] = grid
if dropped_eics:
print('\t\tEmissions were lost, because these EICs were not found in the GSREF file:')
for eic in sorted(dropped_eics):
print('\t\t\t' + str(eic))
if self.print_totals:
self._print_daily_totals(daily_emis, out_path)
self._print_input_output_comparison(ncf, in_emis, daily_emis, out_path)
ncf.close()
def _print_daily_totals(self, daily_emis, out_path):
''' If requested, print a CSV of the scaled emissions totals.
Input data is in dictionaries of the form:
daily_emis[region][eic][poll] => tons/day
'''
# default criteria pollutants
polls = ['CO', 'NOX', 'SOX', 'TOG', 'PM', 'NH3']
# write output file
fout = open(out_path.replace('.nc7', '.daily_totals.csv'), 'w')
fout.write('Region,EIC,Pollutant,' + ','.join(polls) + ' (tons/day)\n')
# write totals totals
for region, region_data in daily_emis.items():
for eic, eic_data in region_data.items():
line = str(region) + ',' + str(eic)
for poll in polls:
line += ','
if poll in eic_data:
line += str(eic_data[poll])
fout.write(line + '\n')
fout.close()
def _print_input_output_comparison(self, ncf, emis, daily_emis, out_path):
''' If requested, print a CSV of the output emissions totals.
Input data is in dictionaries of the form:
emis[region][airport][eic][poll] => tons/day
daily_emis[region][eic][poll]] => tons/day
'''
# find species position by pollutant
species = {}
for group in self.groups:
for i in range(len(np.atleast_1d(self.groups[group]['species']))):
species[self.groups[group]['species'][i]] = {'group': group, 'index': i}
no2_ind = np.where(self.groups['NOX']['species']=='NO2')[0][0]
so2_ind = np.where(self.groups['SOX']['species']=='SO2')[0][0]
# create NCF species totals
totals = {}
for spec in ncf.variables:
if spec == 'TFLAG': continue
totals[spec] = np.sum(ncf.variables[spec][:24,:,:,:])
# create input pollutant totals
in_totals = {}
for airport_emis in emis.values():
for eic_emis in airport_emis.values():
for poll_emis in eic_emis.values():
for poll, value in poll_emis.items():
if poll not in in_totals:
in_totals[poll] = 0.0
in_totals[poll] += value
# create scaled emissions totals
scaled_totals = {}
for region_emis in daily_emis.values():
for eic_emis in region_emis.values():
for poll, value in eic_emis.items():
if poll not in scaled_totals:
scaled_totals[poll] = 0.0
scaled_totals[poll] += value
# write output file
fout = open(out_path.replace('.nc7', '.input_output_comparison.csv'), 'w')
fout.write('Pollutant,Input Files(tons/day),After Scaling(tons/day),NCF Output(tons/day)\n')
# write pollutant totals
for poll in sorted(in_totals.keys()):
ncf_total = 0.0
for sp in self.groups[poll]['species']:
ind = species[sp]['index']
fraction = (self.STONS_HR_2_G_SEC / self.groups[poll]['weights'][ind])
# fix species weights for multi-species gases
if poll == 'NOX':
fraction *= self.groups[poll]['weights'][ind] / self.groups[poll]['weights'][no2_ind]
elif poll == 'SOX':
fraction *= self.groups[poll]['weights'][ind] / self.groups[poll]['weights'][so2_ind]
ncf_total += totals[sp] / fraction
in_total = str(in_totals[poll]) if poll in in_totals else '0.0'
scaled_total = str(scaled_totals[poll]) if poll in scaled_totals else '0.0'
fout.write(','.join([poll, in_total, scaled_total, str(ncf_total)]) + '\n')
fout.close()
def _add_grid_cells(self, grid, grid_cells, fraction):
''' Given a dictionary of (layer, row, col) -> float,
create a 3D grid to store the emissions.
'''
for (z, x, y), value in grid_cells.items():
grid[(z, y, x)] += value * fraction
def _create_netcdf(self, out_path, d, jdate):
''' Creates a blank CMAQ-ready NetCDF file, including all the important
boilerplate and header information. But does not fill in any emissions data.
'''
date = d.strftime(self.date_format)
# define some header variables
current_date = int(time.strftime("%Y%j"))
current_time = int(time.strftime("%H%M%S"))
# create and outline NetCDF file
ncf = Dataset(out_path, 'w', format='NETCDF4_CLASSIC')
TSTEP = ncf.createDimension('TSTEP', None)
DATE_TIME = ncf.createDimension('DATE-TIME', 2)
LAY = ncf.createDimension('LAY', self.nlayers)
VAR = ncf.createDimension('VAR', self.num_species) # number of variables/species
ROW = ncf.createDimension('ROW', self.nrows) # Domain: number of rows
COL = ncf.createDimension('COL', self.ncols) # Domain: number of columns
# define TFLAG Variable
TFLAG = ncf.createVariable('TFLAG', 'i4', ('TSTEP', 'VAR', 'DATE-TIME',), zlib=True)
TFLAG.units = '<YYYYDDD,HHMMSS>'
TFLAG.long_name = 'TFLAG'
TFLAG.var_desc = 'Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS'
# define variables and attribute definitions
varl = ''
for group in self.groups:
for species in self.groups[group]['species']:
ncf.createVariable(species, 'f4', ('TSTEP', 'LAY', 'ROW', 'COL'), zlib=True)
ncf.variables[species].long_name = species
ncf.variables[species].units = self.groups[group]['units']
ncf.variables[species].var_desc = 'emissions'
varl += species.ljust(16)
# global attributes
ncf.IOAPI_VERSION = self.header['IOAPI_VERSION']
ncf.EXEC_ID = self.header['EXEC_ID']
ncf.FTYPE = self.header['FTYPE'] # file type ID
ncf.CDATE = current_date # current date e.g. 2013137
ncf.CTIME = current_time # current time e.g. 50126
ncf.WDATE = current_date # current date e.g. 2013137
ncf.WTIME = current_time # current time e.g. 50126
ncf.SDATE = jdate # scenario date e.g. 2010091
ncf.STIME = self.header['STIME'] # start time e.g. 80000 (for GMT)
ncf.TSTEP = self.header['TSTEP'] # time step e.g. 10000 (1 hour)
ncf.NTHIK = self.header['NTHIK'] # Domain: perimeter thickness (boundary files only)
ncf.NCOLS = self.header['NCOLS'] # Domain: number of columns in modeling domain
ncf.NROWS = self.header['NROWS'] # Domain: number of rows in modeling domain
ncf.NLAYS = self.header['NLAYS'] # Domain: number of vertical layers
ncf.NVARS = self.num_species # number of variables/species
ncf.GDTYP = self.header['GDTYP'] # Domain: grid type ID (lat-lon, UTM, RADM, etc...)
ncf.P_ALP = self.header['P_ALP'] # Projection: alpha
ncf.P_BET = self.header['P_BET'] # Projection: betha
ncf.P_GAM = self.header['P_GAM'] # Projection: gamma
ncf.XCENT = self.header['XCENT'] # Projection: x centroid longitude
ncf.YCENT = self.header['YCENT'] # Projection: y centroid latitude
ncf.XORIG = self.header['XORIG'] # Domain: -684000 for CA_4k, -84000 for SC_4k
ncf.YORIG = self.header['YORIG'] # Domain: -564000 for CA_4k, -552000 for SC_4k
ncf.XCELL = self.header['XCELL'] # Domain: x cell width in meters
ncf.YCELL = self.header['YCELL'] # Domain: y cell width in meters
ncf.VGTYP = self.header['VGTYP'] # Domain: grid type ID (lat-lon, UTM, RADM, etc...)
ncf.VGTOP = self.header['VGTOP'] # Domain: Top Vertical layer at 10km
ncf.VGLVLS = self.header['VGLVLS'] # Domain: Vertical layer locations
ncf.GDNAM = self.header['GDNAM']
ncf.UPNAM = self.header['UPNAM']
ncf.FILEDESC = self.header['FILEDESC']
ncf.HISTORY = self.header['HISTORY']
ncf.setncattr('VAR-LIST', varl) # use this command b/c of python not liking hyphen '-'
# seconds since epoch
secs = time.mktime(time.strptime("%s 12" % jdate, "%Y%j %H"))
gmt_shift = time.strftime("%H", time.gmtime(secs))
secs -= (int(gmt_shift) - 8) * 60 * 60
# build TFLAG variable
tflag = np.ones((25, self.num_species, 2), dtype=np.int32)
for hr in range(25):
gdh = time.strftime("%Y%j %H0000", time.gmtime(secs + hr * 60 * 60))
a_date,ghr = map(int, gdh.split())
tflag[hr,:,0] = tflag[hr,:,0] * a_date
tflag[hr,:,1] = tflag[hr,:,1] * ghr
ncf.variables['TFLAG'][:] = tflag
ncf.VGTYP = 7
return ncf, gmt_shift
def _load_gsref(self):
''' load the gsref file
File Format: eic,profile,group
0,CO,CO
0,NH3,NH3
0,SOx,SOX
0,DEFNOx,NOX
0,900,PM
'''
self.gsref = {}
f = open(self.gsref_file, 'r')
for line in f.readlines():
ln = line.rstrip().split(',')
if len(ln) != 3:
continue
eic = int(ln[0])
if eic not in self.eics:
continue
profile = ln[1].upper()
group = ln[2].upper()
if eic not in self.gsref:
self.gsref[eic] = {}
self.gsref[eic][group] = profile
f.close()
def _load_weight_file(self):
""" load molecular weight file
File Format:
NO 30.006 NOX moles/s
NO2 46.006 NOX moles/s
HONO 47.013 NOX moles/s
"""
# read molecular weight text file
fin = open(self.weight_file,'r')
lines = fin.read()
fin.close()
# read in CSV or Fortran-formatted file
lines = lines.replace(',', ' ')
lines = lines.split('\n')
self.groups = {}
# loop through file lines and
for line in lines:
# parse line
columns = line.rstrip().split()
if not columns:
continue
species = columns[0].upper()
weight = np.float32(columns[1])
group = columns[2].upper()
# file output dict
if group not in self.groups:
units = columns[3]
self.groups[group] = {'species': [], 'weights': [], 'units': units}
self.groups[group]['species'].append(species)
self.groups[group]['weights'].append(weight)
# convert weight list to numpy.array
for grp in self.groups:
self.groups[grp]['species'] = np.array(self.groups[grp]['species'], dtype=np.dtype(str))
self.groups[grp]['weights'] = np.array(self.groups[grp]['weights'], dtype=np.float32)
# calculate the number of species total
self.num_species = 0
for group in self.groups:
self.num_species += len(self.groups[group]['species'])
def _load_gspro(self):
''' load the gspro file
File Format: group, pollutant, species, mole fraction, molecular weight=1, mass fraction
1,TOG,CH4,3.1168E-03,1,0.0500000
1,TOG,ALK3,9.4629E-03,1,0.5500000
1,TOG,ETOH,5.4268E-03,1,0.2500000
'''
self.gspro = {}
f = open(self.gspro_file, 'r')
for line in f.readlines():
# parse line
ln = line.rstrip().split(',')
profile = ln[0].upper()
group = ln[1].upper()
if float(ln[5]) <= 0:
continue
if group not in self.groups:
sys.exit('ERROR: Group ' + group + ' not found in molecular weights file.')
pollutant = ln[2].upper()
try:
poll_index = list(self.groups[group]['species']).index(pollutant)
except ValueError:
# we don't care about that pollutant
pass
# start filling output dict
if profile not in self.gspro:
self.gspro[profile] = {}
if group not in self.gspro[profile]:
self.gspro[profile][group] = np.zeros(len(self.groups[group]['species']),
dtype=np.float32)
self.gspro[profile][group][poll_index] = np.float32(ln[5])
# TOG is in moles, not grams
if group == 'TOG':
self.gspro[profile][group][poll_index] = np.float32(ln[3])
f.close()
def _build_custom_file_path(self, date):
""" Build output file directory and path for a daily, multi-region NetCDF file.
NOTE: This method uses an extremely detailed file naming convention,
designed specifically for the CARB. For example:
st_4k.ac.v0938..2012.203107d18..e14..ncf
[statewide]_[4km grid].[aircraft].[version 938]..[base year 2012].
[model year 2031][month 7]d[day 18]..[EIC 14 categories]..ncf
"""
# parse date info
yr, month, day = date.strftime(self.date_format).split('-')
# create output dir, if necessary
out_dir = os.path.join(self.directory, 'ncf')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# define the grid size string
grid_size = '4k'
grid_name = os.path.basename(self.grid_file)
if '12km' in grid_name:
grid_size = '12k'
elif '36km' in grid_name:
grid_size = '36k'
elif '1km' in grid_name:
grid_size = '1k'
elif '250m' in grid_name:
grid_size = '250m'
# find region from example inventory file
region = 'st_'
if self.in_file[3] == '_':
region = self.in_file[:4]
elif self.in_file[2] == '_':
region = self.in_file[:3]
# find the snapshot code, if any
snapshot = ''
file_bits = self.in_file.split('.')
if len(file_bits) > 8:
if 'snp' in file_bits[6] or 'rf' in file_bits[6]:
snapshot = file_bits[6]
# build the file path, in one of two different formats
if self.three_day_month:
weekday = 'sat' if date.weekday() == 5 else 'sun' if date.weekday() == 6 else 'wdy'
file_name = region + grid_size + '.ac.' + self.version + '..' + str(self.base_year) + \
'.' + yr + month + weekday + '.' + snapshot + '.e14..nc7'
else:
file_name = region + grid_size + '.ac.' + self.version + '..' + str(self.base_year) + \
'.' + yr + month + 'd' + day + '.' + snapshot + '.e14..nc7'
return os.path.join(out_dir, file_name)
if __name__ == '__main__':
main()
|
mmb-carb/GATE
|
GATE.py
|
Python
|
gpl-3.0
| 73,403
|
[
"NetCDF"
] |
777945af8eaee4e8ed9cab7c19294a9b9c01605f90e51bba876ce7af6b4ffb77
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_dnspolicy
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of DnsPolicy Avi RESTful Object
description:
- This module is used to configure DnsPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
created_by:
description:
- Creator name.
- Field introduced in 17.1.1.
description:
description:
- Field introduced in 17.1.1.
name:
description:
- Name of the dns policy.
- Field introduced in 17.1.1.
rule:
description:
- Dns rules.
- Field introduced in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the dns policy.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create DnsPolicy object
avi_dnspolicy:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_dnspolicy
"""
RETURN = '''
obj:
description: DnsPolicy (api/dnspolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str',),
rule=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'dnspolicy',
set([]))
if __name__ == '__main__':
main()
|
RackSec/ansible
|
lib/ansible/modules/network/avi/avi_dnspolicy.py
|
Python
|
gpl-3.0
| 3,647
|
[
"VisIt"
] |
4af71086237de5db99014bf4d68fb12e777b2ddb24ada7fc182057f9484c5bdc
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import copy
import math
import collections
import numpy as np
import qcelemental as qcel
from .vecutil import *
from .exceptions import *
from .libmintscoordentry import NumberValue, VariableValue, CartesianEntry, ZMatrixEntry
from .libmintspointgrp import SymmOps, similar, SymmetryOperation, PointGroup
LINEAR_A_TOL = 1.0E-2 # When sin(a) is below this, we consider the angle to be linear
DEFAULT_SYM_TOL = 1.0E-8
FULL_PG_TOL = 1.0e-8
ZERO = 1.0E-14
NOISY_ZERO = 1.0E-8
class LibmintsMolecule():
"""Class to store the elements, coordinates, fragmentation pattern,
charge, multiplicity of a molecule. Largely replicates psi4's libmints
Molecule class, developed by Justin M. Turney and Andy M. Simmonett
with incremental improvements by other psi4 developers.
Roughly, this class mirrors `psi4.core.Molecule`; however, it's never
used directly and instead serves as a base class for `qcdb.Molecule`
in alongside-file molecule.py.
>>> H2OH2O = qcdb.Molecule(\"\"\"
0 1
O1 -1.551007 -0.114520 0.000000
H1 -1.934259 0.762503 0.000000
H2 -0.599677 0.040712 0.000000
--
0 1
X 0.000000 0.000000 0.000000
O2 1.350625 0.111469 0.000000
H3 1.680398 -0.373741 -0.758561
H4 1.680398 -0.373741 0.758561
no_com
no_reorient
units angstrom
\"\"\")
>>> H2O = qcdb.Molecule.init_with_xyz('h2o.xyz')
"""
FullPointGroupList = ["ATOM", "C_inf_v", "D_inf_h", "C1", "Cs", "Ci",
"Cn", "Cnv", "Cnh", "Sn", "Dn", "Dnd", "Dnh", "Td", "Oh", "Ih"]
def __init__(self):
"""Initialize Molecule object from string in psi4 format"""
# <<< Basic Molecule Information >>>
# Molecule (or fragment) name
self.PYname = 'default'
# Molecule comment
self.PYcomment = ''
# Molecule origin
self.PYprovenance = []
# Molecule connectivity
self.PYconnectivity = []
# The molecular charge
self.PYmolecular_charge = 0
# The multiplicity (defined as 2Ms + 1)
self.PYmultiplicity = 1
# The units used to define the geometry
self.PYunits = 'Angstrom'
# The conversion factor to take input units to Bohr
self.PYinput_units_to_au = 1.0 / qcel.constants.bohr2angstroms
# Whether this molecule has at least one zmatrix entry
self.zmat = False # TODO None?
# Whether this molecule has at least one Cartesian entry
self.cart = False # TODO None?
# <<< Coordinates >>>
# Atom info vector (no knowledge of dummy atoms)
self.atoms = []
# Atom info vector (includes dummy atoms)
self.full_atoms = []
# A list of all variables known, whether they have been set or not.
self.all_variables = []
# A listing of the variables used to define the geometries
self.geometry_variables = {}
# Limited lifetime efficiency boost
self.wholegeom = None
# <<< Fragmentation >>>
# The list of atom ranges defining each fragment from parent molecule
self.fragments = []
# A list describing how to handle each fragment
self.fragment_types = []
# The charge of each fragment
self.fragment_charges = []
# The multiplicity of each fragment
self.fragment_multiplicities = []
# <<< Frame >>>
# Move to center of mass or not?
self.PYmove_to_com = True
# Reorient or not? UNUSED
self.PYfix_orientation = False
# Reinterpret the coord entries or not (Default is true, except for findif)
self.PYreinterpret_coordentries = True
# Nilpotence boolean (flagged upon first determination of symmetry frame,
# reset each time a substantiative change is made)
self.lock_frame = False
# <<< Symmetry >>>
# Point group to use with this molecule
self.pg = None
# Full point group
self.full_pg = 'C1'
# n of the highest rotational axis Cn
self.PYfull_pg_n = 1
# Symmetry string from geometry specification
self.PYsymmetry_from_input = None
# Number of unique atoms
self.PYnunique = 0
# Number of equivalent atoms per unique atom (length nunique)
self.nequiv = 0
# Equivalent atom mapping array (length 1st dim nunique)
self.equiv = 0
# Atom to unique atom mapping array (length natom)
self.PYatom_to_unique = 0
# <<< Simple Methods for Basic Molecule Information >>>
def name(self):
"""Get molecule name
>>> print(H2OH2O.name())
water_dimer
"""
return self.PYname
def set_name(self, name):
"""Set molecule name
>>> H2OH2O.set_name('water_dimer')
"""
self.PYname = name
def comment(self):
"""Get molecule comment
>>> print(H2OH2O.comment())
I am S22-2
"""
return self.PYcomment
def set_comment(self, comment):
"""Set molecule comment
>>> H2OH2O.set_comment('I am S22-2')
"""
self.PYcomment = comment
def provenance(self):
"""Get molecule provenance
>>> print(H2OH2O.provenance())
{'creator': 'QCElemental',
'routine': 'qcelemental.molparse.from_arrays',
'version': 'v0.1.0a+8.g465f4e3'}
"""
return copy.deepcopy(self.PYprovenance)
def set_provenance(self, provenance):
"""Set molecule provenance
>>> H2OH2O.set_provenance('water_dimer')
"""
self.PYprovenance = provenance
def connectivity(self):
"""Get molecule connectivity
>>> print(H2OH2O.connectivity())
[(0, 1, 1.0), (0, 2, 1.0)]
"""
return copy.deepcopy(self.PYconnectivity)
def set_connectivity(self, connectivity):
"""Set molecule connectivity
>>> H2OH2O.set_connectivity([(0, 1, 1.0), (0, 2, 1.0)])
"""
self.PYconnectivity = connectivity
def natom(self):
"""Number of atoms
>>> print(H2OH2O.natom())
6
"""
return len(self.atoms)
def nallatom(self):
"""Number of all atoms (includes dummies)
>>> print(H2OH2O.nallatom())
7
"""
return len(self.full_atoms)
def molecular_charge(self):
"""Gets the molecular charge
>>> print(H2OH2O.molecular_charge())
-2
"""
return self.PYmolecular_charge
def set_molecular_charge(self, charge):
"""Sets the molecular charge
>>> H2OH2O.set_molecular_charge(-2)
"""
if not float(charge).is_integer():
raise ValidationError('System charge must be integer: {}'.format(charge))
self.PYcharge_specified = True
self.PYmolecular_charge = int(charge)
def multiplicity(self):
"""Get the multiplicity (defined as 2Ms + 1)
>>> print(H2OH2O.multiplicity())
"""
return self.PYmultiplicity
def set_multiplicity(self, mult):
"""Sets the multiplicity (defined as 2Ms + 1)
>>> H2OH2O.set_multiplicity(3)
"""
if not float(mult).is_integer() or float(mult) < 0.0:
raise ValidationError('System multiplicity must be positive integer: {}'.format(mult))
self.PYmultiplicity_specified = True
self.PYmultiplicity = int(mult)
def units(self):
"""Gets the geometry units
>>> print(H2OH2O.units())
Angstrom
"""
return self.PYunits
def set_units(self, units):
"""Sets the geometry units (constructor use).
Parameters
----------
units : {'Angstrom', 'Bohr'}
Units of input geometry.
Returns
-------
None
Examples
--------
# [1]
>>> H2OH2O.set_units('Angstrom')
"""
if units == 'Angstrom':
self.PYunits = units
self.PYinput_units_to_au = 1.0 / qcel.constants.bohr2angstroms
elif units == 'Bohr':
self.PYunits = units
self.PYinput_units_to_au = 1.0
else:
raise ValidationError("""Molecule::set_units: argument must be 'Angstrom' or 'Bohr'.""")
def input_units_to_au(self):
"""Gets the geometry unit conversion."""
return self.PYinput_units_to_au
def set_input_units_to_au(self, conv):
"""Sets the geometry unit conversion. May be used to override internal a2b physconst"""
if abs(conv - self.PYinput_units_to_au) < 0.05:
self.PYinput_units_to_au = conv
else:
raise ValidationError("""No big perturbations to physical constants!""")
def has_zmatrix(self):
"""Gets the presence of any zmatrix entry
>>> print(H2OH2O.has_zmatrix())
False
"""
return self.zmat
def set_has_zmatrix(self, tf):
"""Sets the presence of any zmatrix entry
>>> H2OH2O.set_has_zmatrix(True)
"""
self.zmat = tf
def has_cartesian(self):
"""Gets the presence of any Cartesian entry
>>> print(H2OH2O.has_cartesian())
False
"""
return self.cart
def set_has_cartesian(self, tf):
"""Sets the presence of any Cartesian entry
>>> H2OH2O.set_has_cartesian(True)
"""
self.cart = tf
# <<< Simple Methods for Coordinates >>>
def Z(self, atom):
"""Nuclear charge of atom (0-indexed)
>>> print(H2OH2O.Z(4))
1
"""
return self.atoms[atom].Z()
def x(self, atom):
"""x position of atom (0-indexed) in Bohr
>>> print(H2OH2O.x(4))
3.17549201425
"""
return self.input_units_to_au() * self.atoms[atom].compute()[0]
def y(self, atom):
"""y position of atom (0-indexed) in Bohr
>>> print(H2OH2O.y(4))
-0.706268134631
"""
return self.input_units_to_au() * self.atoms[atom].compute()[1]
def z(self, atom):
"""z position of atom (0-indexed) in Bohr
>>> print(H2OH2O.z(4))
-1.43347254509
"""
return self.input_units_to_au() * self.atoms[atom].compute()[2]
def xyz(self, atom, np_out=False):
"""Returns a Vector3 with x, y, z position of atom (0-indexed)
in Bohr or coordinate at *posn*
>>> print(H2OH2O.xyz(4))
[3.175492014248769, -0.7062681346308132, -1.4334725450878665]
"""
xyz = self.input_units_to_au() * np.asarray(self.atoms[atom].compute())
if np_out:
return xyz
else:
return xyz.tolist()
def mass(self, atom):
"""Returns mass of atom (0-indexed)
>>> print(H2OH2O.mass(4))
1.00782503207
"""
if self.atoms[atom].mass() != 0.0:
return self.atoms[atom].mass()
if math.fabs(self.atoms[atom].Z() - int(self.atoms[atom].Z())) > 0.0:
print("""WARNING: Obtaining masses from atom with fractional charge...may be incorrect!!!\n""")
# TODO outfile
return qcel.periodictable.to_mass(int(self.atoms[atom].Z()))
def set_mass(self, atom, mass):
"""Set the mass of a particular atom (good for isotopic substitutions).
Parameters
----------
atom : int
0-indexed, dummy-inclusive atom index to set.
mass : float
Non-negative mass in [u] for `atom`.
Returns
-------
None
"""
mass = float(mass)
if mass < 0.0:
raise ValidationError('Mass must be positive: {}'.format(mass))
self.lock_frame = False
self.full_atoms[atom].set_mass(mass)
self.full_atoms[atom].set_A(-1)
def symbol(self, atom):
"""Returns the cleaned up label of the atom (C2 => C, H4 = H) (0-indexed)
>>> print(H2OH2O.symbol(4))
H
"""
return self.atoms[atom].symbol()
def label(self, atom):
"""Returns the original label of the atom (0-indexed) as given in the input file (C2, H4). (0-indexed)
>>> print(H2OH2O.label(4))
H3
"""
return self.atoms[atom].label()
def charge(self, atom):
"""Returns charge of atom (0-indexed).
Related to SAD guess in libmints version.
>>> print(H2OH2O.charge(4))
1.0
"""
return self.atoms[atom].charge()
def mass_number(self, atom):
"""Mass number (A) of atom (0-indexed)
>>> print(H2OH2O.mass_number(4))
1
"""
return self.atoms[atom].A()
def fZ(self, atom):
"""Nuclear charge of atom (includes dummies)
>>> print(H2OH2O.fZ(4))
8
"""
return self.full_atoms[atom].Z()
def fx(self, atom):
"""x position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fx(4))
2.55231135823
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[0]
def fy(self, atom):
"""y position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fy(4))
0.210645882307
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[1]
def fz(self, atom):
"""z position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fz(4))
0.0
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[2]
def fxyz(self, atom):
"""Returns a Vector3 with x, y, z position of atom
(0-indexed) in Bohr (includes dummies)
>>> print(H2OH2O.fxyz(4))
[2.5523113582286716, 0.21064588230662976, 0.0]
"""
return scale(self.full_atoms[atom].compute(), self.input_units_to_au())
def fmass(self, atom):
"""Returns mass of atom (0-indexed, includes dummies)
>>> print(H2OH2O.fmass(4))
15.9949146196
"""
return self.full_atoms[atom].mass()
def fsymbol(self, atom):
"""Returns the cleaned up label of the atom (C2 => C, H4 = H) (includes dummies) (0-indexed)
>>> print(H2OH2O.fsymbol(4))
O
"""
return self.full_atoms[atom].symbol()
def flabel(self, atom):
"""Returns the original label of the atom (0-indexed) as given in
the input file (C2, H4) (includes dummies)
>>> print(H2OH2O.flabel(4))
O2
"""
return self.full_atoms[atom].label()
def fcharge(self, atom):
"""Returns charge of atom (0-indexed, includes dummies).
Related to SAD guess in libmints version.
>>> print(H2OH2O.fcharge(4))
8.0
"""
return self.full_atoms[atom].charge()
def fmass_number(self, atom):
"""Mass number of atom (0-indexed)
>>> print(H2OH2O.fmass_number(4))
1
"""
return self.full_atoms[atom].A()
# <<< Simple Methods for Fragmentation >>>
def nfragments(self):
"""The number of fragments in the molecule.
>>> print(H2OH2O.nfragments())
2
"""
return len(self.fragments)
def nactive_fragments(self):
"""The number of active fragments in the molecule.
>>> print(H2OH2O.nactive_fragments())
2
"""
n = 0
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Real':
n += 1
return n
def activate_all_fragments(self):
"""Sets all fragments in the molecule to be active."""
self.lock_frame = False
for fr in range(self.nfragments()):
self.fragment_types[fr] = 'Real'
def set_active_fragment(self, fr):
"""Tags fragment index *fr* as composed of real atoms."""
self.lock_frame = False
self.fragment_types[fr - 1] = 'Real'
def set_active_fragments(self, reals):
"""Tags the fragments in array *reals* as composed of real atoms."""
self.lock_frame = False
for fr in reals:
self.fragment_types[fr - 1] = 'Real'
def set_ghost_fragment(self, fr):
"""Tags fragment index *fr* as composed of ghost atoms."""
self.lock_frame = False
self.fragment_types[fr - 1] = 'Ghost'
def set_ghost_fragments(self, ghosts):
"""Tags the fragments in array *ghosts* as composed of ghost atoms."""
self.lock_frame = False
for fr in ghosts:
self.fragment_types[fr - 1] = 'Ghost'
def deactivate_all_fragments(self):
"""Sets all fragments in the molecule to be inactive."""
self.lock_frame = False
for fr in range(self.nfragments()):
self.fragment_types[fr] = 'Absent'
def extract_subsets(self, reals, ghosts=[]):
"""Wrapper for :py:func:`~qcdb.molecule.extract_fragments`.
See note there. This function can be used as long as not
in psi4 input file. Use extract_fragments directly, then.
>>> H2OH2O.extract_subsets(2) # monomer B, unCP-corrected
>>> H2OH2O.extract_subsets(2,1) # monomer B, CP-corrected
>>> obj.extract_subsets(1,[2,3]) # monomer A, CP-corrected if obj is tri-molecular complex
"""
return self.extract_fragments(reals, ghosts=ghosts)
def extract_fragments(self, reals, ghosts=[]):
"""Makes a copy of the molecule, returning a new molecule with
only certain fragment atoms present as either ghost or real atoms
*reals*: The list or int of fragments (1-indexed) that should be present in the molecule as real atoms.
*ghosts*: The list or int of fragments (1-indexed) that should be present in the molecule as ghosts.
(method name in libmints is extract_subsets. This is different
in qcdb because the psi4 input parser tries to process lines with
that term, giving rise to Boost:Python type conlicts.) See usage
at :py:func:`~qcdb.molecule.extract_fragments`.
"""
lreals = []
try:
for idx in reals:
lreals.append(idx - 1)
except TypeError:
lreals = [reals - 1]
lghosts = []
try:
for idx in ghosts:
lghosts.append(idx - 1)
except TypeError:
lghosts = [ghosts - 1]
if len(lreals) + len(lghosts) > self.nfragments():
raise ValidationError('Molecule::extract_fragments: sum of real- and ghost-atom subsets is greater than the number of subsets')
subset = self.clone()
subset.deactivate_all_fragments()
for fr in lreals:
subset.set_active_fragment(fr + 1) # the active fragment code subtracts 1
for fr in lghosts:
subset.set_ghost_fragment(fr + 1) # the ghost fragment code subtracts 1
subset.update_geometry()
return subset
def get_fragments(self):
"""The list of atom ranges defining each fragment from parent molecule.
Returns
-------
list of lists
(nfr, 2) actual member data, for constructor use only.
"""
return self.fragments
def get_fragment_types(self):
"""A list describing how to handle each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_types
def get_fragment_charges(self):
"""The charge of each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_charges
def get_fragment_multiplicities(self):
"""The multiplicity of each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_multiplicities
# <<< Methods for Construction >>>
def create_molecule_from_string(self, text):
"""Given a string *text* of psi4-style geometry specification
(including newlines to separate lines), builds a new molecule.
Called from constructor.
"""
raise FeatureDeprecated("qcdb.Molecule.create_molecule_from_string. Replace with qcdb.Molecule.from_string(..., dtype='psi4+')")
def init_with_checkpoint(self, chkpt):
""" **NYI** Pull information from the *chkpt* object passed
(method name in libmints is init_with_chkpt)
"""
raise FeatureNotImplemented('Molecule::init_with_checkpoint') # FINAL
def init_with_io(self, psio):
""" **NYI** Pull information from a chkpt object created from psio
(method name in libmints is init_with_psio)
"""
raise FeatureNotImplemented('Molecule::init_with_io') # FINAL
def clone(self):
"""Returns new, independent Molecule object.
>>> dimer = H2OH2O.clone()
"""
return copy.deepcopy(self)
# <<< Methods for Printing >>>
def print_out(self):
"""Print the molecule.
(method name in libmints is print)
>>> H2OH2O.print_out()
Geometry (in Angstrom), charge = -2, multiplicity = 3:
Center X Y Z
------------ ----------------- ----------------- -----------------
O -1.551007000000 -0.114520000000 0.000000000000
H -1.934259000000 0.762503000000 0.000000000000
H -0.599677000000 0.040712000000 0.000000000000
O 1.350625000000 0.111469000000 0.000000000000
H 1.680398000000 -0.373741000000 -0.758561000000
H 1.680398000000 -0.373741000000 0.758561000000
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Angstrom' if self.units() == 'Angstrom' else 'Bohr', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
geom = self.atoms[i].compute()
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
for j in range(3):
text += """ %17.12f""" % (geom[j])
text += "\n"
# TODO if (Process::environment.options.get_int("PRINT") > 2) {
text += "\n"
for i in range(self.natom()):
Astr = '' if self.mass_number(i) == -1 else str(self.mass_number(i))
text += """ %8s\n""" % (Astr + self.label(i))
for bas in self.atoms[i].basissets().keys():
text += """ %-15s %-20s""" % (bas,
self.atoms[i].basissets()[bas])
if bas in self.atoms[i].shells():
text += """%s""" % (self.atoms[i].shells()[bas])
text += '\n'
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_out_in_bohr(self):
"""Print the molecule in Bohr. Same as :py:func:`print_out` only in Bohr.
(method name in libmints is print_in_bohr)
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Bohr', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
text += (""" %17.12f""" * 3).format(*(self.xyz(i)))
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_out_in_angstrom(self):
"""Print the molecule in Angstroms. Same as :py:func:`print_out` only always in Angstroms.
(method name in libmints is print_in_angstrom)
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Angstrom', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
text += (""" %17.12f""" * 3).format(*self.xyz(i) * qcel.constants.bohr2angstroms)
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_full(self):
"""Print full atom list. Same as :py:func:`print_out` only displays dummy atoms.
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
(self.units(), self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.nallatom()):
geom = self.full_atoms[i].compute()
text += """ %3s%-7s """ % ("" if self.fZ(i) else "Gh(", self.fsymbol(i) + ("" if self.fZ(i) else ")"))
for j in range(3):
text += """ %17.12f""" % (geom[j])
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_in_input_format(self):
"""Print the molecule in the same format that the user provided.
"""
text = ""
if self.nallatom():
text += " Geometry (in %s), charge = %d, multiplicity = %d:\n\n" % \
("Angstrom" if self.units() == 'Angstrom' else "Bohr",
self.molecular_charge(), self.multiplicity())
for i in range(self.nallatom()):
if self.fZ(i) or self.fsymbol(i) == "X":
text += " %-8s" % (self.fsymbol(i))
else:
text += " %-8s" % ("Gh(" + self.fsymbol(i) + ")")
text += self.full_atoms[i].print_in_input_format()
text += "\n"
if len(self.geometry_variables):
for vb, val in self.geometry_variables.items():
text += """ %-10s=%16.10f\n""" % (vb, val)
text += "\n"
print(text)
# TODO outfile
def everything(self):
"""Quick print of class data"""
text = """ ==> qcdb Molecule %s <==\n\n""" % (self.name())
text += """ Natom %d\t\tNallatom %d\n""" % (self.natom(), self.nallatom())
text += """ charge %d\t\tspecified? NA\n""" % (self.molecular_charge())
text += """ multiplicity %d\t\tspecified? NA\n""" % (self.multiplicity())
text += """ units %s\tconversion %f\n""" % (self.units(), self.input_units_to_au())
text += """ DOcom? %s\t\tDONTreorient? %s\n""" % (self.PYmove_to_com, self.orientation_fixed())
text += """ reinterpret? %s\t\tlock_frame? %s\n""" % (self.PYreinterpret_coordentries, self.lock_frame)
text += """ input symm %s\n""" % (self.symmetry_from_input())
text += """ Nfragments %d\t\tNactive %d\n""" % (self.nfragments(), self.nactive_fragments())
text += """ zmat? %s\n""" % (self.has_zmatrix())
print(text)
def create_psi4_string_from_molecule(self, force_c1=False):
"""Regenerates a input file molecule specification string from the
current state of the Molecule. Contains geometry info,
fragmentation, charges and multiplicities, and any frame
restriction.
"""
text = ""
if self.nallatom():
# append units and any other non-default molecule keywords
text += " units %-s\n" % ("Angstrom" if self.units() == 'Angstrom' else "Bohr")
if not self.PYmove_to_com:
text += " no_com\n"
if self.PYfix_orientation:
text += " no_reorient\n"
if force_c1:
text += " symmetry c1\n"
text += " {} {}\n --\n".format(self.molecular_charge(), self.multiplicity())
# append atoms and coordentries and fragment separators with charge and multiplicity
Pfr = 0
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Absent' and not self.has_zmatrix():
continue
text += "%s %s%d %d\n" % (
"" if Pfr == 0 else " --\n",
"#" if self.fragment_types[fr] == 'Ghost' or self.fragment_types[fr] == 'Absent' else "",
self.fragment_charges[fr], self.fragment_multiplicities[fr])
Pfr += 1
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
if self.fragment_types[fr] == 'Absent':
text += " %-8s" % ("X")
elif self.fZ(at) or self.fsymbol(at) == "X":
text += " %-8s" % (self.flabel(at))
else:
text += " %-8s" % ("Gh(" + self.flabel(at) + ")")
text += " %s" % (self.full_atoms[at].print_in_input_format())
text += "\n"
# append any coordinate variables
if len(self.geometry_variables):
for vb, val in self.geometry_variables.items():
text += """ %-10s=%16.10f\n""" % (vb, val)
text += "\n"
return text
# <<< Involved Methods for Coordinates >>>
def get_coord_value(self, vstr):
"""Attempts to interpret a string as a double, if not it assumes it's a variable.
"""
vstr = vstr.upper()
realNumber = re.compile(r"""[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?""", re.VERBOSE)
# handle number values
if realNumber.match(vstr):
return NumberValue(float(vstr))
# handle variable values, whether defined or not
else:
if vstr == 'TDA':
self.geometry_variables[vstr] = 360.0 * math.atan(math.sqrt(2)) / math.pi
# handle negative variable values (ignore leading '-' and return minus the value)
if vstr[0] == '-':
self.all_variables.append(vstr[1:])
return VariableValue(vstr[1:], self.geometry_variables, True)
# handle normal variable values
else:
self.all_variables.append(vstr)
return VariableValue(vstr, self.geometry_variables)
def add_atom(self, Z, x, y, z, symbol, mass=0.0, charge=0.0, label='', A=-1, lineno=-1):
"""Add an atom to the molecule
*Z* atomic number
*x* cartesian coordinate
*y* cartesian coordinate
*z* cartesian coordinate
*symbol* atomic symbol to use
*mass* mass to use if non standard
*charge* charge to use if non standard
*label* extended symbol with user info
*A* mass number
*lineno* line number when taken from a string
"""
self.lock_frame = False
self.set_has_cartesian(True)
if label == '':
label = symbol
#if self.atom_at_position([x, y, z]) == -1:
if True:
# Dummies go to full_atoms, ghosts need to go to both.
self.full_atoms.append(CartesianEntry(self.nallatom(), Z, charge, mass, symbol, label, A,
NumberValue(x), NumberValue(y), NumberValue(z)))
if label.upper() != 'X':
self.atoms.append(self.full_atoms[-1])
else:
raise ValidationError("Molecule::add_atom: Adding atom on top of an existing atom.")
# For use with atoms defined with ZMAT or variable values, i.e., not Cartesian and NumberValue
def add_unsettled_atom(self, Z, anchor, symbol, mass=0.0, charge=0.0, label='', A=-1):
self.lock_frame = False
numEntries = len(anchor)
currentAtom = len(self.full_atoms)
# handle cartesians
if numEntries == 3:
self.set_has_cartesian(True)
xval = self.get_coord_value(anchor[0])
yval = self.get_coord_value(anchor[1])
zval = self.get_coord_value(anchor[2])
self.full_atoms.append(CartesianEntry(currentAtom, Z, charge,
mass, symbol, label, A,
xval, yval, zval))
# handle first line of Zmat
elif numEntries == 0:
self.set_has_zmatrix(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A))
# handle second line of Zmat
elif numEntries == 2:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
rval = self.get_coord_value(anchor[1])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval))
# handle third line of Zmat
elif numEntries == 4:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
aTo = self.get_anchor_atom(anchor[2], '')
if aTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[2]))
if aTo == rTo:
raise ValidationError("Error: atom used multiple times")
rval = self.get_coord_value(anchor[1])
aval = self.get_coord_value(anchor[3])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
if self.full_atoms[aTo].symbol() == 'X':
aval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval,
self.full_atoms[aTo], aval))
# handle fourth line of Zmat
elif numEntries == 6:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
aTo = self.get_anchor_atom(anchor[2], '')
if aTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[2]))
dTo = self.get_anchor_atom(anchor[4], '')
if dTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[4]))
if aTo == rTo or rTo == dTo or aTo == dTo: # for you star wars fans
raise ValidationError("Error: atom used multiple times")
rval = self.get_coord_value(anchor[1])
aval = self.get_coord_value(anchor[3])
dval = self.get_coord_value(anchor[5])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
if self.full_atoms[aTo].symbol() == 'X':
aval.set_fixed(True)
if self.full_atoms[dTo].symbol() == 'X':
dval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval,
self.full_atoms[aTo], aval,
self.full_atoms[dTo], dval))
else:
raise ValidationError('Illegal geometry specification (neither Cartesian nor Z-Matrix)')
def atom_entry(self, atom):
"""Returns the CoordEntry for an atom."""
return self.atoms[atom]
def atom_at_position(self, b, tol=0.05):
"""Tests to see if an atom is at the passed position *b* in Bohr with a tolerance *tol*.
>>> print(H2OH2O.atom_at_position([1.35*(1.0/psi_bohr2angstroms), 0.10*(1.0/psi_bohr2angstroms), 0.0*(1.0/psi_bohr2angstroms)]))
3
"""
if len(b) != 3:
raise ValidationError('Molecule::atom_at_position: Argument vector not of length 3\n')
if self.natom() == 0:
return -1
if self.wholegeom is not None:
current_geom = self.wholegeom
else:
current_geom = self.geometry(np_out=True)
shifted_geom = current_geom - np.asarray(b)
dist2 = np.sum(np.square(shifted_geom), axis=1)
distminidx = np.argmin(dist2)
if dist2[distminidx] < tol * tol:
return distminidx
else:
return -1
def is_variable(self, vstr):
"""Checks to see if the variable str is in the list, returns
true if it is, and returns false if not.
>>> H2OH2O.is_variable('R')
False
"""
return True if vstr.upper() in self.all_variables else False
def get_variable(self, vstr):
"""Checks to see if the variable str is in the list, sets it to
val and returns true if it is, and returns false if not.
"""
vstr = vstr.upper()
try:
return self.geometry_variables[vstr]
except KeyError:
raise ValidationError('Molecule::get_variable: Geometry variable %s not known.\n' % (vstr))
def set_variable(self, vstr, val):
"""Assigns the value val to the variable labelled string in the
list of geometry variables. Also calls update_geometry()
"""
self.lock_frame = False
self.geometry_variables[vstr.upper()] = val
print("""Setting geometry variable %s to %f""" % (vstr.upper(), val))
try:
self.update_geometry()
except IncompleteAtomError:
# Update geometry might have added some atoms, delete them to be safe.
self.atoms = []
# TODO outfile
def set_geometry_variable(self, vstr, val):
"""Plain assigns the vlue val to the variable labeled string in the list of geometry variables."""
self.geometry_variables[vstr.upper()] = val
def get_anchor_atom(self, vstr, line):
"""Attempts to interpret a string *vstr* as an atom specifier in
a zmatrix. Takes the current *line* for error message printing.
Returns the atom number (adjusted to zero-based counting).
"""
integerNumber = re.compile(r"(-?\d+)", re.IGNORECASE)
if integerNumber.match(vstr):
# This is just a number, return it
return int(vstr) - 1
else:
# Look to see if this string is known
for i in range(self.nallatom()):
if self.full_atoms[i].label() == vstr:
return i
raise ValidationError("Molecule::get_anchor_atom: Illegal value %s in atom specification on line %s.\n" % (vstr, line))
def geometry(self, np_out=False):
"""Returns the geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]]
"""
geom = np.asarray([self.atoms[at].compute() for at in range(self.natom())])
geom *= self.input_units_to_au()
if np_out:
return geom
else:
return geom.tolist()
def full_geometry(self, np_out=False):
"""Returns the full (dummies included) geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.full_geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [0.0, 0.0, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]]
"""
geom = np.asarray([self.full_atoms[at].compute() for at in range(self.nallatom())])
geom *= self.input_units_to_au()
if np_out:
return geom
else:
return geom.tolist()
def set_geometry(self, geom):
"""Sets the geometry, given a N X 3 array of coordinates *geom* in Bohr.
>>> H2OH2O.set_geometry([[1,2,3],[4,5,6],[7,8,9],[-1,-2,-3],[-4,-5,-6],[-7,-8,-9]])
"""
self.lock_frame = False
for at in range(self.natom()):
self.atoms[at].set_coordinates(geom[at][0] / self.input_units_to_au(),
geom[at][1] / self.input_units_to_au(),
geom[at][2] / self.input_units_to_au())
def set_full_geometry(self, geom):
"""Sets the full geometry (dummies included), given a N X 3 array of coordinates *geom* in Bohr.
>>> H2OH2O.set_full geometry([[1,2,3],[4,5,6],[7,8,9],[0,0,0],[-1,-2,-3],[-4,-5,-6],[-7,-8,-9]])
"""
self.lock_frame = False
for at in range(self.nallatom()):
self.full_atoms[at].set_coordinates(geom[at][0] / self.input_units_to_au(),
geom[at][1] / self.input_units_to_au(),
geom[at][2] / self.input_units_to_au())
def distance_matrix(self):
"""Computes a matrix depicting distances between atoms. Prints
formatted and returns array.
>>> H2OH2O.distance_matrix()
Interatomic Distances (Angstroms)
[1] [2] [3] [4] [5] [6]
[1] 0.00000
[2] 0.95711 0.00000
[3] 0.96391 1.51726 0.00000
[4] 2.91042 3.34878 1.95159 0.00000
[5] 3.32935 3.86422 2.43843 0.95895 0.00000
[6] 3.32935 3.86422 2.43843 0.95895 1.51712 0.00000
"""
distm = qcel.util.distance_matrix(self.geometry(np_out=True), self.geometry(np_out=True))
distm *= qcel.constants.bohr2angstroms
text = " Interatomic Distances (Angstroms)\n\n "
for i in range(self.natom()):
text += '%11s ' % ('[' + str(i + 1) + ']')
text += "\n"
for i in range(self.natom()):
text += ' %-8s ' % ('[' + str(i + 1) + ']')
for j in range(self.natom()):
if j > i:
continue
else:
text += '%10.5f ' % (distm(i, j))
text += "\n"
text += "\n\n"
print(text)
return distm
# TODO outfile
def print_distances(self):
"""Print the geometrical parameters (distances) of the molecule.
suspect libmints version actually prints Bohr.
>>> print(H2OH2O.print_distances())
Interatomic Distances (Angstroms)
Distance 1 to 2 0.957
Distance 1 to 3 0.964
Distance 1 to 4 2.910
...
"""
text = " Interatomic Distances (Angstroms)\n\n"
for i in range(self.natom()):
for j in range(i + 1, self.natom()):
eij = sub(self.xyz(j), self.xyz(i))
dist = norm(eij) * qcel.constants.bohr2angstroms
text += " Distance %d to %d %-8.3lf\n" % (i + 1, j + 1, dist)
text += "\n\n"
return text
# TODO outfile
def print_bond_angles(self):
"""Print the geometrical parameters (bond_angles) of the molecule.
>>> print(H2OH2O.print_bond_angles())
Bond Angles (degrees)
Angle 2-1-3: 104.337
Angle 2-1-4: 109.152
Angle 2-1-5: 117.387
...
"""
text = " Bond Angles (degrees)\n\n"
for j in range(self.natom()):
for i in range(self.natom()):
if j == i:
continue
for k in range(i + 1, self.natom()):
if j == k:
continue
eji = sub(self.xyz(i), self.xyz(j))
eji = normalize(eji)
ejk = sub(self.xyz(k), self.xyz(j))
ejk = normalize(ejk)
dotproduct = dot(eji, ejk)
phi = 180.0 * math.acos(dotproduct) / math.pi
text += " Angle %d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, phi)
text += "\n\n"
return text
# TODO outfile
def print_dihedrals(self):
"""Print the geometrical parameters (dihedrals) of the molecule.
>>> print(H2OH2O.print_dihedrals())
Dihedral Angles (Degrees)
Dihedral 1-2-3-4: 180.000
Dihedral 1-2-3-5: 133.511
Dihedral 1-2-3-6: 133.511
...
"""
text = " Dihedral Angles (Degrees)\n\n"
for i in range(self.natom()):
for j in range(self.natom()):
if i == j:
continue
for k in range(self.natom()):
if i == k or j == k:
continue
for l in range(self.natom()):
if i == l or j == l or k == l:
continue
eij = sub(self.xyz(j), self.xyz(i))
eij = normalize(eij)
ejk = sub(self.xyz(k), self.xyz(j))
ejk = normalize(ejk)
ekl = sub(self.xyz(l), self.xyz(k))
ekl = normalize(ekl)
# Compute angle ijk
angleijk = math.acos(dot(scale(eij, -1.0), ejk))
# Compute angle jkl
anglejkl = math.acos(dot(scale(ejk, -1.0), ekl))
# compute term1 (eij x ejk)
term1 = cross(eij, ejk)
# compute term2 (ejk x ekl)
term2 = cross(ejk, ekl)
numerator = dot(term1, term2)
denominator = math.sin(angleijk) * math.sin(anglejkl)
try:
costau = numerator / denominator
except ZeroDivisionError:
costau = 0.0
if costau > 1.00 and costau < 1.000001:
costau = 1.00
if costau < -1.00 and costau > -1.000001:
costau = -1.00
tau = 180.0 * math.acos(costau) / math.pi
text += " Dihedral %d-%d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, l + 1, tau)
text += "\n\n"
return text
# TODO outfile
def print_out_of_planes(self):
"""Print the geometrical parameters (out_of_planes) of the molecule.
>>> print(H2OH2O.print_out_of_planes())
Out-Of-Plane Angles (Degrees)
Out-of-plane 1-2-3-4: 0.000
Out-of-plane 1-2-3-5: -7.373
Out-of-plane 1-2-3-6: 7.373
...
"""
text = " Out-Of-Plane Angles (Degrees)\n\n"
for i in range(self.natom()):
for j in range(self.natom()):
if i == j:
continue
for k in range(self.natom()):
if i == k or j == k:
continue
for l in range(self.natom()):
if i == l or j == l or k == l:
continue
# Compute vectors we need first
elj = sub(self.xyz(j), self.xyz(l))
elj = normalize(elj)
elk = sub(self.xyz(k), self.xyz(l))
elk = normalize(elk)
eli = sub(self.xyz(i), self.xyz(l))
eli = normalize(eli)
# Denominator
denominator = math.sin(math.acos(dot(elj, elk)))
# Numerator
eljxelk = cross(elj, elk)
numerator = dot(eljxelk, eli)
# compute angle
try:
sinetheta = numerator / denominator
except ZeroDivisionError:
sinetheta = 0.0
if sinetheta > 1.00:
sinetheta = 1.000
if sinetheta < -1.00:
sinetheta = -1.000
theta = 180.0 * math.asin(sinetheta) / math.pi
text += " Out-of-plane %d-%d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, l + 1, theta)
text += "\n\n"
return text
# TODO outfile
def reinterpret_coordentry(self, rc):
"""Do we reinterpret coordentries during a call to update_geometry?
(method name in libmints is set_reinterpret_coordentry)
"""
self.PYreinterpret_coordentries = rc
def reinterpret_coordentries(self):
"""Reinterpret the fragments for reals/ghosts and build the atom list.
"""
self.atoms = []
for item in self.full_atoms:
item.invalidate()
temp_charge = self.PYmolecular_charge
temp_multiplicity = self.PYmultiplicity
self.PYmolecular_charge = 0
high_spin_multiplicity = 1
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Absent':
continue
if self.fragment_types[fr] == 'Real':
self.PYmolecular_charge += self.fragment_charges[fr]
high_spin_multiplicity += self.fragment_multiplicities[fr] - 1
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
self.full_atoms[at].compute()
self.full_atoms[at].set_ghosted(self.fragment_types[fr] == 'Ghost')
if self.full_atoms[at].symbol() != 'X':
self.atoms.append(self.full_atoms[at])
# TODO: This is a hack to ensure that set_multiplicity and set_molecular_charge
# work for single-fragment molecules.
if self.nfragments() < 2:
self.PYmolecular_charge = temp_charge
self.PYmultiplicity = temp_multiplicity
else:
if (self.fragment_types.count('Real') == len(self.fragments)) and ((temp_multiplicity % 2) == (high_spin_multiplicity % 2)):
# give low-spin a chance, so long as ghost/absent fragments can't be complicating the picture
self.PYmultiplicity = temp_multiplicity
else:
self.PYmultiplicity = high_spin_multiplicity
def update_geometry(self):
"""Updates the geometry, by (re)interpreting the string used to
create the molecule, and the current values of the variables.
The atoms list is cleared, and then rebuilt by this routine.
This function must be called after first instantiation of Molecule.
>>> H2 = qcdb.Molecule("H\\nH 1 0.74\\n")
>>> print(H2.natom())
0
>>> H2.update_geometry()
>>> print(H2.natom())
2
"""
if self.nallatom() == 0:
print("Warning: There are no quantum mechanical atoms in this molecule.")
# Idempotence condition
if self.lock_frame:
return
#print("beginning update_geometry:")
#self.print_full()
if self.PYreinterpret_coordentries:
self.reinterpret_coordentries()
#print("after reinterpret_coordentries:")
#self.print_full()
if self.PYmove_to_com:
self.move_to_com()
#print("after com:")
#self.print_full()
self.wholegeom = self.geometry(np_out=True)
# If the no_reorient command was given, don't reorient
if not self.PYfix_orientation:
# Now we need to rotate the geometry to its symmetry frame
# to align the axes correctly for the point group
# symmetry_frame looks for the highest point group so that we can align
# the molecule according to its actual symmetry, rather than the symmetry
# the the user might have provided.
frame = self.symmetry_frame()
self.rotate_full(frame)
#print("after rotate:")
#self.print_full()
self.wholegeom = self.geometry(np_out=True)
# Recompute point group of the molecule, so the symmetry info is updated to the new frame
self.set_point_group(self.find_point_group())
self.set_full_point_group()
self.wholegeom = self.geometry(np_out=True)
# Disabling symmetrize for now if orientation is fixed, as it is not
# correct. We may want to fix this in the future, but in some cases of
# finite-differences the set geometry is not totally symmetric anyway.
# Symmetrize the molecule to remove any noise
self.symmetrize()
#print("after symmetry:")
#self.print_full()
self.wholegeom = None
self.lock_frame = True
# <<< Methods for Miscellaneous >>>
def clear(self):
"""Zero it out."""
self.lock_frame = False
self.atoms = []
self.full_atoms = []
def nuclear_repulsion_energy(self):
"""Computes nuclear repulsion energy.
>>> print(H2OH2O.nuclear_repulsion_energy())
36.6628478528
"""
e = 0.0
for at1 in range(self.natom()):
for at2 in range(self.natom()):
if at2 < at1:
Zi = self.Z(at1)
Zj = self.Z(at2)
dist = distance(self.xyz(at1), self.xyz(at2))
e += Zi * Zj / dist
return e
def nuclear_repulsion_energy_deriv1(self):
"""Computes nuclear repulsion energy derivatives
>>> print(H2OH2O.nuclear_repulsion_energy_deriv1())
[[3.9020946901323774, 2.76201566471991, 0.0], [1.3172905807089021, -2.3486366050337293, 0.0], [-1.8107598525022435, -0.32511212499256564, 0.0], [-1.217656141385739, -2.6120090867576717, 0.0], [-1.0954846384766488, 1.2618710760320282, 2.1130743287465603], [-1.0954846384766488, 1.2618710760320282, -2.1130743287465603]]
"""
de = []
for i in range(self.natom()):
entry = [0.0, 0.0, 0.0]
for j in range(self.natom()):
if i != j:
temp = distance(self.xyz(i), self.xyz(j)) ** 3.0
Zi = self.Z(i)
Zj = self.Z(j)
entry[0] -= (self.x(i) - self.x(j)) * Zi * Zj / temp
entry[1] -= (self.y(i) - self.y(j)) * Zi * Zj / temp
entry[2] -= (self.z(i) - self.z(j)) * Zi * Zj / temp
de.append(entry)
return de
def nuclear_repulsion_energy_deriv2(self):
""" **NYI** Computes nuclear repulsion energy second derivatives"""
raise FeatureNotImplemented('Molecule::nuclear_repulsion_energy_deriv2') # FINAL
def set_basis_all_atoms(self, name, role="BASIS"):
"""Assigns basis *name* to all atoms."""
for atom in self.full_atoms:
atom.set_basisset(name, role)
def set_basis_by_symbol(self, symbol, name, role="BASIS"):
"""Assigns basis *name* to all *symbol* atoms."""
for atom in self.full_atoms:
if symbol.upper() == atom.symbol():
atom.set_basisset(name, role)
def clear_basis_all_atoms(self):
"""Remove all basis information from atoms."""
for atom in self.full_atoms:
atom.PYbasissets = collections.OrderedDict()
def set_basis_by_number(self, number, name, role="BASIS"):
"""Assigns basis *name* to atom number *number* (0-indexed, excludes dummies)."""
# change from libmints to 0-indexing and to real/ghost numbering, dummies not included (libmints >= error)
if number >= self.natom():
raise ValidationError("Molecule::set_basis_by_number: Basis specified for atom %d, but there are only %d atoms in this molecule." %
(number, self.natom()))
self.atoms[number].set_basisset(name, role)
def set_basis_by_label(self, label, name, role="BASIS"):
"""Assigns basis *name* to all atoms with *label*."""
for atom in self.full_atoms:
if label.upper() == atom.label():
atom.set_basisset(name, role)
def set_shell_by_number(self, number, bshash, role="BASIS"):
"""Assigns BasisSet *bshash* to atom number *number* (0-indexed, excludes dummies)."""
self.lock_frame = False
if number >= self.natom():
raise ValidationError("Molecule::set_shell_by_number: Basis specified for atom %d, but there are only %d atoms in this molecule." %
(number, self.natom()))
self.atoms[number].set_shell(bshash, role)
def nfrozen_core(self, depth=False):
"""Number of frozen core for molecule given freezing state.
>>> print(H2OH2O.nfrozen_core())
2
"""
if not depth or depth.upper() == 'FALSE':
return 0
elif depth or depth.upper() == 'TRUE':
# Freeze the number of core electrons corresponding to the
# nearest previous noble gas atom. This means that the 4p block
# will still have 3d electrons active. Alkali earth atoms will
# have one valence electron in this scheme.
nfzc = 0
for A in range(self.natom()):
if self.Z(A) > 2:
nfzc += 1
if self.Z(A) > 10:
nfzc += 4
if self.Z(A) > 18:
nfzc += 4
if self.Z(A) > 36:
nfzc += 9
if self.Z(A) > 54:
nfzc += 9
if self.Z(A) > 86:
nfzc += 16
if self.Z(A) > 108:
raise ValidationError("Molecule::nfrozen_core: Invalid atomic number")
return nfzc
else:
raise ValidationError("Molecule::nfrozen_core: Frozen core '%s' is not supported, options are {true, false}." % (depth))
# <<< Involved Methods for Frame >>>
def translate(self, r):
"""Translates molecule by r.
>>> H2OH2O.translate([1.0, 1.0, 0.0])
"""
temp = [None, None, None]
for at in range(self.nallatom()):
temp = scale(self.full_atoms[at].compute(), self.input_units_to_au())
temp = add(temp, r)
temp = scale(temp, 1.0 / self.input_units_to_au())
self.full_atoms[at].set_coordinates(temp[0], temp[1], temp[2])
def center_of_mass(self):
"""Computes center of mass of molecule (does not translate molecule).
>>> H2OH2O.center_of_mass()
[-0.12442647346606871, 0.00038657002584110707, 0.0]
"""
ret = [0.0, 0.0, 0.0]
total_m = 0.0
for at in range(self.natom()):
m = self.mass(at)
ret = add(ret, scale(self.xyz(at), m))
total_m += m
ret = scale(ret, 1.0 / total_m)
return ret
def move_to_com(self):
"""Moves molecule to center of mass
"""
com = scale(self.center_of_mass(), -1.0)
self.translate(com)
def set_com_fixed(self, _fix=True):
""" **NYI** Fix the center of mass at its current frame.
Not used in libmints so not implemented.
"""
raise FeatureNotImplemented('Molecule::set_com_fixed') # FINAL
# def inertia_tensor(self):
# """Compute inertia tensor.
#
# >>> print(H2OH2O.inertia_tensor())
# [[8.704574864178731, -8.828375721817082, 0.0], [-8.828375721817082, 280.82861714077666, 0.0], [0.0, 0.0, 281.249500988553]]
#
# """
# tensor = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
#
# for i in range(self.natom()):
# # I(alpha, alpha)
# tensor[0][0] += self.mass(i) * (self.y(i) * self.y(i) + self.z(i) * self.z(i))
# tensor[1][1] += self.mass(i) * (self.x(i) * self.x(i) + self.z(i) * self.z(i))
# tensor[2][2] += self.mass(i) * (self.x(i) * self.x(i) + self.y(i) * self.y(i))
#
# # I(alpha, beta)
# tensor[0][1] -= self.mass(i) * self.x(i) * self.y(i)
# tensor[0][2] -= self.mass(i) * self.x(i) * self.z(i)
# tensor[1][2] -= self.mass(i) * self.y(i) * self.z(i)
#
# # mirror
# tensor[1][0] = tensor[0][1]
# tensor[2][0] = tensor[0][2]
# tensor[2][1] = tensor[1][2]
#
# # Check the elements for zero and make them a hard zero.
# for i in range(3):
# for j in range(3):
# if math.fabs(tensor[i][j]) < ZERO:
# tensor[i][j] = 0.0
# return tensor
def rotational_constants(self, tol=FULL_PG_TOL, return_units='cm^-1'):
"""Compute the rotational constants and moments of inertia.
Parameters
----------
return_units : str, optional
Selector for rotational constants (among: 'GHz', 'MHz', 'cm^-1') or
moments of inertia (among: 'u a0^2', 'u A^2').
Returns
-------
np.array
1 by 3 of rotational constants or moments of inertia in units of `return_units`.
Notes
-----
This used to return a list with inf values as None.
"""
evals, evecs = diagonalize3x3symmat(self.inertia_tensor())
evals = sorted(evals)
evals = np.asarray(evals)
im_amuA = qcel.constants.bohr2angstroms * qcel.constants.bohr2angstroms
im_ghz = qcel.constants.h * qcel.constants.na * 1e14 / (8 * math.pi * math.pi * qcel.constants.bohr2angstroms * qcel.constants.bohr2angstroms)
im_mhz = im_ghz * 1000.
im_cm = im_ghz * 1.e7 / qcel.constants.c
rc_moi = {}
rc_moi['u a0^2'] = evals
rc_moi['u A^2'] = evals * im_amuA
with np.errstate(divide='ignore'):
rc_moi['GHz'] = im_ghz / evals
rc_moi['MHz'] = im_mhz / evals
rc_moi['cm^-1'] = im_cm / evals
fmt = """ {:12} {a:3} {:16.8f} {b:3} {:16.8f} {c:3} {:16.8f}\n"""
text = " Moments of Inertia and Rotational Constants\n\n"
text += fmt.format('[u a0^2]', a='I_A', b='I_B', c='I_C', *rc_moi['u a0^2'])
text += fmt.format('[u A^2]', a='I_A', b='I_B', c='I_C', *rc_moi['u A^2'])
text += fmt.format('[GHz]', a='A', b='B', c='C', *rc_moi['GHz'])
text += fmt.format('[MHz]', a='A', b='B', c='C', *rc_moi['MHz'])
text += fmt.format('[cm^-1]', a='A', b='B', c='C', *rc_moi['cm^-1'])
print(text)
# TODO outfile
return rc_moi[return_units]
def rotor_type(self, tol=FULL_PG_TOL):
"""Returns the rotor type.
>>> H2OH2O.rotor_type()
RT_ASYMMETRIC_TOP
"""
rot_const = self.rotational_constants()
for i in range(3):
if rot_const[i] is None:
rot_const[i] = 0.0
# Determine degeneracy of rotational constants.
degen = 0
for i in range(2):
for j in range(i + 1, 3):
if degen >= 2:
continue
rabs = math.fabs(rot_const[i] - rot_const[j])
tmp = rot_const[i] if rot_const[i] > rot_const[j] else rot_const[j]
if rabs > ZERO:
rel = rabs / tmp
else:
rel = 0.0
if rel < tol:
degen += 1
#print("\tDegeneracy is %d\n" % (degen))
# Determine rotor type
if self.natom() == 1:
rotor_type = 'RT_ATOM'
elif rot_const[0] == 0.0:
rotor_type = 'RT_LINEAR' # 0 < IB == IC inf > B == C
elif degen == 2:
rotor_type = 'RT_SPHERICAL_TOP' # IA == IB == IC A == B == C
elif degen == 1:
rotor_type = 'RT_SYMMETRIC_TOP' # IA < IB == IC A > B == C --or--
# IA == IB < IC A == B > C
else:
rotor_type = 'RT_ASYMMETRIC_TOP' # IA < IB < IC A > B > C
return rotor_type
def rotate(self, R):
"""Rotates the molecule using rotation matrix *R*.
>>> H2OH2O.rotate([[0,-1,0],[-1,0,0],[0,0,1]])
"""
new_geom = zero(3, self.natom())
geom = self.geometry()
new_geom = mult(geom, R)
self.set_geometry(new_geom)
def rotate_full(self, R):
"""Rotates the full molecule using rotation matrix *R*.
>>> H2OH2O.rotate_full([[0,-1,0],[-1,0,0],[0,0,1]])
"""
new_geom = zero(3, self.nallatom())
geom = self.full_geometry()
new_geom = mult(geom, R)
self.set_full_geometry(new_geom)
def com_fixed(self):
"""Get whether or not center of mass is fixed.
>>> H2OH2O.com_fixed()
True
"""
return not self.PYmove_to_com
def fix_com(self, _fix=True):
"""Whether to fix the Cartesian position (True) in its current frame or
to translate to the C.O.M. (False).
(method name in libmints is set_com_fixed)
"""
self.PYmove_to_com = not _fix
def orientation_fixed(self):
"""Get whether or not orientation is fixed.
>>> H2OH2O.orientation_fixed()
True
"""
return self.PYfix_orientation
def fix_orientation(self, _fix=True):
"""Fix the orientation at its current frame
(method name in libmints is set_orientation_fixed)
"""
if _fix:
self.PYfix_orientation = True # tells update_geometry() not to change orientation
# Compute original cartesian coordinates - code coped from update_geometry()
self.atoms = []
for item in self.full_atoms:
item.invalidate()
for fr in range(self.nfragments()):
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
self.full_atoms[at].compute()
self.full_atoms[at].set_ghosted(self.fragment_types[fr] == 'Ghost')
if self.full_atoms[at].symbol() != 'X':
self.atoms.append(self.full_atoms[at])
else: # release orientation to be free
self.PYfix_orientation = False
# <<< Methods for Saving >>>
# def save_string_xyz(self, save_ghosts=True):
# """Save a string for a XYZ-style file.
#
# >>> H2OH2O.save_string_xyz()
# 6
# _
# O -1.551007000000 -0.114520000000 0.000000000000
# H -1.934259000000 0.762503000000 0.000000000000
# H -0.599677000000 0.040712000000 0.000000000000
# O 1.350625000000 0.111469000000 0.000000000000
# H 1.680398000000 -0.373741000000 -0.758561000000
# H 1.680398000000 -0.373741000000 0.758561000000
#
# """
# factor = 1.0 if self.PYunits == 'Angstrom' else qcel.constants.bohr2angstroms
#
# N = self.natom()
# if not save_ghosts:
# N = 0
# for i in range(self.natom()):
# if self.Z(i):
# N += 1
# text = "%d\n\n" % (N)
#
# for i in range(self.natom()):
# [x, y, z] = self.atoms[i].compute()
# if save_ghosts or self.Z(i):
# text += '%2s %17.12f %17.12f %17.12f\n' % ((self.symbol(i) if self.Z(i) else "Gh"), \
# x * factor, y * factor, z * factor)
# return text
def save_xyz(self, filename, save_ghosts=True):
"""Save an XYZ file.
>>> H2OH2O.save_xyz('h2o.xyz')
"""
outfile = open(filename, 'w')
outfile.write(self.save_string_xyz(save_ghosts))
outfile.close()
def save_to_checkpoint(self, chkpt, prefix=""):
""" **NYI** Save information to checkpoint file
(method name in libmints is save_to_chkpt)
"""
raise FeatureNotImplemented('Molecule::save_to_checkpoint') # FINAL
# <<< Methods for Symmetry >>>
def has_symmetry_element(self, op, tol=DEFAULT_SYM_TOL):
""" **NYI** Whether molecule satisfies the vector symmetry
operation *op*. Not used by libmints.
"""
raise FeatureNotImplemented('Molecule::has_symmetry_element') # FINAL
for i in range(self.natom()):
result = naivemult(self.xyz(i), op)
atom = self.atom_at_position(result, tol)
if atom != -1:
if not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
else:
return False
return True
def point_group(self):
"""Returns the point group (object) if set"""
if self.pg is None:
raise ValidationError("Molecule::point_group: Molecular point group has not been set.")
return self.pg
def set_point_group(self, pg):
"""Set the point group to object *pg* """
self.pg = pg
# Call this here, the programmer will forget to call it, as I have many times.
self.form_symmetry_information()
def set_full_point_group(self, tol=FULL_PG_TOL):
"""Determine and set FULL point group. self.PYfull_pg_n is highest
order n in Cn. 0 for atoms or infinity.
"""
verbose = 1 # TODO
# Get cartesian geometry and put COM at origin
geom = self.geometry()
com = self.center_of_mass()
for at in range(self.natom()):
geom[at][0] += -com[0]
geom[at][1] += -com[1]
geom[at][2] += -com[2]
# Get rotor type
rotor = self.rotor_type(tol)
if verbose > 2:
print(""" Rotor type : %s""" % (rotor))
# Get the D2h point group from Jet and Ed's code: c1 ci c2 cs d2 c2v c2h d2h
# and ignore the user-specified subgroup in this case.
pg = self.find_highest_point_group(tol)
d2h_subgroup = pg.symbol()
if verbose > 2:
print(""" D2h_subgroup : %s""" % (self.point_group().symbol()))
# Check inversion
v3_zero = [0.0, 0.0, 0.0]
op_i = self.has_inversion(v3_zero, tol)
if verbose > 2:
print(""" Inversion symmetry : %s""" % ('yes' if op_i else 'no'))
x_axis = [1, 0, 0]
y_axis = [0, 1, 0]
z_axis = [0, 0, 1]
rot_axis = [0.0, 0.0, 0.0]
if rotor == 'RT_ATOM': # atoms
self.full_pg = 'ATOM'
self.PYfull_pg_n = 0
elif rotor == 'RT_LINEAR': # linear molecules
self.full_pg = 'D_inf_h' if op_i else 'C_inf_v'
self.PYfull_pg_n = 0
elif rotor == 'RT_SPHERICAL_TOP': # spherical tops
if not op_i: # The only spherical top without inversion is Td.
self.full_pg = 'Td'
self.PYfull_pg_n = 3
else: # Oh or Ih ?
# Oh has a S4 and should be oriented properly already.
test_mat = matrix_3d_rotation(geom, z_axis, math.pi / 2.0, True)
op_symm = equal_but_for_row_order(geom, test_mat, tol)
if verbose > 2:
print(""" S4z : %s""" % ('yes' if op_symm else 'no'))
if op_symm:
self.full_pg = 'Oh'
self.PYfull_pg_n = 4
else:
self.full_pg = 'Ih'
self.PYfull_pg_n = 5
elif rotor == 'RT_ASYMMETRIC_TOP': # asymmetric tops cannot exceed D2h, right?
if d2h_subgroup == 'c1':
self.full_pg = 'C1'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'ci':
self.full_pg = 'Ci'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'c2':
self.full_pg = 'Cn'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'cs':
self.full_pg = 'Cs'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'd2':
self.full_pg = 'Dn'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'c2v':
self.full_pg = 'Cnv'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'c2h':
self.full_pg = 'Cnh'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'd2h':
self.full_pg = 'Dnh'
self.PYfull_pg_n = 2
else:
print(""" Warning: Cannot determine point group.""")
elif rotor in ['RT_SYMMETRIC_TOP', 'RT_PROLATE_SYMMETRIC_TOP', 'RT_OBLATE_SYMMETRIC_TOP']:
# Find principal axis that is unique and make it z-axis.
It = self.inertia_tensor()
I_evals, I_evecs = diagonalize3x3symmat(It)
ev_list = list(zip(I_evals, transpose(I_evecs))) # eigenvectors are cols of I_evecs
ev_list.sort(key=lambda tup: tup[0], reverse=False)
I_evals, I_evecs = zip(*ev_list) # sorted eigenvectors are now rows of I_evecs
if verbose > 2:
print(""" I_evals: %15.10lf %15.10lf %15.10lf""" % (I_evals[0], I_evals[1], I_evals[2]))
unique_axis = 1
if abs(I_evals[0] - I_evals[1]) < tol:
unique_axis = 2
elif abs(I_evals[1] - I_evals[2]) < tol:
unique_axis = 0
# Compute angle between unique axis and the z-axis
old_axis = I_evecs[unique_axis]
ddot = dot(z_axis, old_axis)
if abs(ddot - 1) < 1.0e-10:
phi = 0.0
elif abs(ddot + 1) < 1.0e-10:
phi = math.pi
else:
phi = math.acos(ddot)
# Rotate geometry to put unique axis on the z-axis, if it isn't already.
if abs(phi) > 1.0e-14:
rot_axis = cross(z_axis, old_axis) # right order?
test_mat = matrix_3d_rotation(geom, rot_axis, phi, False)
if verbose > 2:
print(""" Rotating by %lf to get principal axis on z-axis ...""" % (phi))
geom = [row[:] for row in test_mat]
if verbose > 2:
print(""" Geometry to analyze - principal axis on z-axis:""")
for at in range(self.natom()):
print("""%20.15lf %20.15lf %20.15lf""" % (geom[at][0], geom[at][1], geom[at][2]))
print('\n')
# Determine order Cn and Sn of principal axis.
Cn_z = matrix_3d_rotation_Cn(geom, z_axis, False, tol)
if verbose > 2:
print(""" Highest rotation axis (Cn_z) : %d""" % (Cn_z))
Sn_z = matrix_3d_rotation_Cn(geom, z_axis, True, tol)
if verbose > 2:
print(""" Rotation axis (Sn_z) : %d""" % (Sn_z))
# Check for sigma_h (xy plane).
op_sigma_h = False
for at in range(self.natom()):
if abs(geom[at][2]) < tol:
continue # atom is in xy plane
else:
test_atom = [geom[at][0], geom[at][1], -1 * geom[at][2]]
if not atom_present_in_geom(geom, test_atom, tol):
break
else:
op_sigma_h = True
if verbose > 2:
print(""" sigma_h : %s""" % ('yes' if op_sigma_h else 'no'))
# Rotate one off-axis atom to the yz plane and check for sigma_v's.
for at in range(self.natom()):
dist_from_z = math.sqrt(geom[at][0] * geom[at][0] + geom[at][1] * geom[at][1])
if abs(dist_from_z) > tol:
pivot_atom_i = at
break
if pivot_atom_i == self.natom(): # needs to be in else clause?
raise ValidationError("Molecule::set_full_point_group: Not a linear molecule but could not find off-axis atom.")
# Rotate around z-axis to put pivot atom in the yz plane
xy_point = normalize([geom[pivot_atom_i][0], geom[pivot_atom_i][1], 0])
ddot = dot(y_axis, xy_point)
phi = None
if abs(ddot - 1) < 1.0e-10:
phi = 0.0
elif abs(ddot + 1) < 1.0e-10:
phi = math.pi
else:
phi = math.acos(ddot)
is_D = False
if abs(phi) > 1.0e-14:
test_mat = matrix_3d_rotation(geom, z_axis, phi, False)
if verbose > 2:
print(""" Rotating by %8.3e to get atom %d in yz-plane ...""" % (phi, pivot_atom_i + 1))
geom = [row[:] for row in test_mat]
# Check for sigma_v (yz plane).
op_sigma_v = False
for at in range(self.natom()):
if abs(geom[at][0]) < tol:
continue # atom is in yz plane
else:
test_atom = [-1 * geom[at][0], geom[at][1], geom[at][2]]
if not atom_present_in_geom(geom, test_atom, tol):
break
else:
op_sigma_v = True
if verbose > 2:
print(""" sigma_v : %s""" % ('yes' if op_sigma_v else 'no'))
print(""" geom to analyze - one atom in yz plane:""")
for at in range(self.natom()):
print("""%20.15lf %20.15lf %20.15lf""" % (geom[at][0], geom[at][1], geom[at][2]))
print('\n')
# Check for perpendicular C2's.
# Loop through pairs of atoms to find c2 axis candidates.
for i in range(self.natom()):
A = [geom[i][0], geom[i][1], geom[i][2]]
AdotA = dot(A, A)
for j in range(i):
if self.Z(at) != self.Z(j):
continue # ensure same atomic number
B = [geom[j][0], geom[j][1], geom[j][2]] # ensure same distance from com
if abs(AdotA - dot(B, B)) > 1.0e-6:
continue # loose check
# Use sum of atom vectors as axis if not 0.
axis = add(A, B)
if norm(axis) < 1.0e-12:
continue
axis = normalize(axis)
# Check if axis is perpendicular to z-axis.
if abs(dot(axis, z_axis)) > 1.0e-6:
continue
# Do the thorough check for C2.
if matrix_3d_rotation_Cn(geom, axis, False, tol, 2) == 2:
is_D = True
if verbose > 2:
print(""" perp. C2's : %s""" % ('yes' if is_D else 'no'))
# Now assign point groups! Sn first.
if Sn_z == 2 * Cn_z and not is_D:
self.full_pg = 'Sn'
self.PYfull_pg_n = Sn_z
return
if is_D: # has perpendicular C2's
if op_sigma_h and op_sigma_v: # Dnh : Cn, nC2, sigma_h, nSigma_v
self.full_pg = 'Dnh'
self.PYfull_pg_n = Cn_z
elif Sn_z == 2 * Cn_z: # Dnd : Cn, nC2, S2n axis coincident with Cn
self.full_pg = 'Dnd'
self.PYfull_pg_n = Cn_z
else: # Dn : Cn, nC2
self.full_pg = 'Dn'
self.PYfull_pg_n = Cn_z
else: # lacks perpendicular C2's
if op_sigma_h and Sn_z == Cn_z: # Cnh : Cn, sigma_h, Sn coincident with Cn
self.full_pg = 'Cnh'
self.PYfull_pg_n = Cn_z
elif op_sigma_v: # Cnv : Cn, nCv
self.full_pg = 'Cnv'
self.PYfull_pg_n = Cn_z
else: # Cn : Cn
self.full_pg = 'Cn'
self.PYfull_pg_n = Cn_z
return
def has_inversion(self, origin, tol=DEFAULT_SYM_TOL):
"""Does the molecule have an inversion center at origin
"""
geom = self.geometry(np_out=True)
inverted = 2. * np.asarray(origin) - geom
for at in range(self.natom()):
atom = self.atom_at_position(inverted[at], tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[at]):
return False
return True
def is_plane(self, origin, uperp, tol=DEFAULT_SYM_TOL):
"""Is a plane?
"""
for i in range(self.natom()):
A = sub(self.xyz(i), origin)
Apar = scale(uperp, dot(uperp, A))
Aperp = sub(A, Apar)
A = add(sub(Aperp, Apar), origin)
atom = self.atom_at_position(A, tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
return True
def is_axis(self, origin, axis, order, tol=DEFAULT_SYM_TOL):
"""Is *axis* an axis of order *order* with respect to *origin*?
"""
for i in range(self.natom()):
A = sub(self.xyz(i), origin)
for j in range(1, order):
R = A
R = rotate(R, j * 2.0 * math.pi / order, axis)
R = add(R, origin)
atom = self.atom_at_position(R, tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
return True
def is_linear_planar(self, tol=DEFAULT_SYM_TOL):
"""Is the molecule linear, or planar?
>>> print(H2OH2O.is_linear_planar())
(False, False)
"""
linear = None
planar = None
if self.natom() < 3:
linear = True
planar = True
return linear, planar
# find three atoms not on the same line
A = self.xyz(0)
B = self.xyz(1)
BA = sub(B, A)
BA = normalize(BA)
CA = [None, None, None]
min_BAdotCA = 1.0
for i in range(2, self.natom()):
tmp = sub(self.xyz(i), A)
tmp = normalize(tmp)
if math.fabs(dot(BA, tmp)) < min_BAdotCA:
CA = copy.deepcopy(tmp)
min_BAdotCA = math.fabs(dot(BA, tmp))
if min_BAdotCA >= 1.0 - tol:
linear = True
planar = True
return linear, planar
linear = False
if self.natom() < 4:
planar = True
return linear, planar
# check for nontrivial planar molecules
BAxCA = normalize(cross(BA, CA))
for i in range(2, self.natom()):
tmp = sub(self.xyz(i), A)
if math.fabs(dot(tmp, BAxCA)) > tol:
planar = False
return linear, planar
planar = True
return linear, planar
@staticmethod
def like_world_axis(axis, worldxaxis, worldyaxis, worldzaxis):
"""Returns which worldaxis *axis* most overlaps with.
Inverts axis when indicated.
"""
like = None
xlikeness = math.fabs(dot(axis, worldxaxis))
ylikeness = math.fabs(dot(axis, worldyaxis))
zlikeness = math.fabs(dot(axis, worldzaxis))
if (xlikeness - ylikeness) > 1.0E-12 and (xlikeness - zlikeness) > 1.0E-12:
like = 'XAxis'
if dot(axis, worldxaxis) < 0:
axis = scale(axis, -1.0)
elif (ylikeness - zlikeness) > 1.0E-12:
like = 'YAxis'
if dot(axis, worldyaxis) < 0:
axis = scale(axis, -1.0)
else:
like = 'ZAxis'
if dot(axis, worldzaxis) < 0:
axis = scale(axis, -1.0)
return like, axis
def find_point_group(self, tol=DEFAULT_SYM_TOL):
"""Find computational molecular point group, user can override
this with the "symmetry" keyword. Result is highest D2h subgroup
attendant on molecule and allowed by the user.
"""
pg = self.find_highest_point_group(tol) # D2h subgroup
user = self.symmetry_from_input()
if user is not None:
# Need to handle the cases that the user only provides C2, C2v, C2h, Cs.
# These point groups need directionality.
# Did the user provide directionality? If they did, the last letter would be x, y, or z
# Directionality given, assume the user is smart enough to know what they're doing.
user_specified_direction = True if user[-1] in ['X', 'x', 'Y', 'y', 'Z', 'z'] else False
if self.symmetry_from_input() != pg.symbol():
user = PointGroup(self.symmetry_from_input())
if user_specified_direction:
# Assume the user knows what they're doing.
# Make sure user is subgroup of pg
if (pg.bits() & user.bits()) != user.bits():
raise ValidationError("Molecule::find_point_group: User specified point group (%s) is not a subgroup of the highest detected point group (%s)" % (PointGroup.bits_to_full_name(user.bits()), PointGroup.bits_to_full_name(pg.bits())))
else:
similars, count = similar(user.bits())
found = False
for typ in range(count):
# If what the user specified and the similar type
# matches the full point group we've got a match
if (similars[typ] & pg.bits()) == similars[typ]:
found = True
break
if found:
# Construct a point group object using the found similar
user = PointGroup(similars[typ])
else:
raise ValidationError("Molecule::find_point_group: User specified point group (%s) is not a subgroup of the highest detected point group (%s). If this is because the symmetry increased, try to start the calculation again from the last geometry, after checking any symmetry-dependent input, such as DOCC." % (PointGroup.bits_to_full_name(user.bits()), PointGroup.bits_to_full_name(pg.bits())))
# If we make it here, what the user specified is good.
pg = user
return pg
def reset_point_group(self, pgname):
"""Override symmetry from outside the molecule string"""
self.PYsymmetry_from_input = pgname.lower()
self.set_point_group(self.find_point_group())
def find_highest_point_group(self, tol=DEFAULT_SYM_TOL):
"""Find the highest D2h point group from Jet and Ed's code: c1
ci c2 cs d2 c2v c2h d2h. Ignore the user-specified subgroup in
this case.
"""
pg_bits = 0
# The order of the next 2 arrays MUST match!
symm_bit = [
SymmOps['C2_z'],
SymmOps['C2_y'],
SymmOps['C2_x'],
SymmOps['i'],
SymmOps['Sigma_xy'],
SymmOps['Sigma_xz'],
SymmOps['Sigma_yz']]
symm_func = [
SymmetryOperation.c2_z,
SymmetryOperation.c2_y,
SymmetryOperation.c2_x,
SymmetryOperation.i,
SymmetryOperation.sigma_xy,
SymmetryOperation.sigma_xz,
SymmetryOperation.sigma_yz]
symop = SymmetryOperation()
matching_atom = -1
# Only needs to detect the 8 symmetry operations
for g in range(7):
# Call the function pointer
symm_func[g](symop)
found = True
for at in range(self.natom()):
op = [symop[0][0], symop[1][1], symop[2][2]]
pos = naivemult(self.xyz(at), op)
matching_atom = self.atom_at_position(pos, tol)
if matching_atom >= 0:
if not self.atoms[at].is_equivalent_to(self.atoms[matching_atom]):
found = False
break
else:
found = False
break
if found:
pg_bits |= symm_bit[g]
return PointGroup(pg_bits)
def symmetry_frame(self, tol=DEFAULT_SYM_TOL):
"""Determine symmetry reference frame. If noreorient is not set,
this is the rotation matrix applied to the geometry in update_geometry.
>>> print(H2OH2O.symmetry_frame())
[[1.0, -0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -0.0, 1.0]]
"""
com = self.center_of_mass()
if self.wholegeom is not None:
current_geom = self.wholegeom
else:
current_geom = self.geometry(np_out=True)
shifted_geom = current_geom - np.asarray(com)
shifted_geom = shifted_geom.tolist()
worldxaxis = [1.0, 0.0, 0.0]
worldyaxis = [0.0, 1.0, 0.0]
worldzaxis = [0.0, 0.0, 1.0]
sigma = [0.0, 0.0, 0.0]
sigmav = [0.0, 0.0, 0.0]
c2axis = [0.0, 0.0, 0.0]
c2axisperp = [0.0, 0.0, 0.0]
linear, planar = self.is_linear_planar(tol)
have_inversion = self.has_inversion(com, tol)
# check for C2 axis
have_c2axis = False
if self.natom() < 2:
have_c2axis = True
c2axis = [0.0, 0.0, 1.0]
elif linear:
have_c2axis = True
c2axis = sub(self.xyz(1), self.xyz(0))
c2axis = normalize(c2axis)
elif planar and have_inversion:
# there is a c2 axis that won't be found using the usual
# algorithm. find two noncolinear atom-atom vectors (we know
# that linear == 0)
BA = sub(self.xyz(1), self.xyz(0))
BA = normalize(BA)
for i in range(2, self.natom()):
CA = sub(self.xyz(i), self.xyz(0))
CA = normalize(CA)
BAxCA = cross(BA, CA)
if norm(BAxCA) > tol:
have_c2axis = True
BAxCA = normalize(BAxCA)
c2axis = copy.deepcopy(BAxCA)
break
else:
# loop through pairs of atoms to find c2 axis candidates
for i in range(self.natom()):
A = shifted_geom[i]
AdotA = dot(A, A)
for j in range(i + 1):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = shifted_geom[j]
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
axis = add(A, B)
# atoms colinear with the com don't work
if norm(axis) < tol:
continue
axis = normalize(axis)
if self.is_axis(com, axis, 2, tol):
have_c2axis = True
c2axis = copy.deepcopy(axis)
break
else:
continue
break
# symmframe found c2axis
c2like = 'ZAxis'
if have_c2axis:
# try to make the sign of the axis correspond to one of the world axes
c2like, c2axis = self.like_world_axis(c2axis, worldxaxis, worldyaxis, worldzaxis)
# check for c2 axis perp to first c2 axis
have_c2axisperp = False
if have_c2axis:
if self.natom() < 2:
have_c2axisperp = True
c2axisperp = [1.0, 0.0, 0.0]
elif linear:
if have_inversion:
have_c2axisperp = True
c2axisperp = perp_unit(c2axis, [0.0, 0.0, 1.0])
else:
# loop through paris of atoms to find c2 axis candidates
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
for j in range(i):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
axis = add(A, B)
# atoms colinear with the com don't work
if norm(axis) < tol:
continue
axis = normalize(axis)
# if axis is not perp continue
if math.fabs(dot(axis, c2axis)) > tol:
continue
if self.is_axis(com, axis, 2, tol):
have_c2axisperp = True
c2axisperp = copy.deepcopy(axis)
break
else:
continue
break
# symmframe found c2axisperp
if have_c2axisperp:
# try to make the sign of the axis correspond to one of the world axes
c2perplike, c2axisperp = self.like_world_axis(c2axisperp, worldxaxis, worldyaxis, worldzaxis)
# try to make c2axis the z axis
if c2perplike == 'ZAxis':
tmpv = copy.deepcopy(c2axisperp)
c2axisperp = copy.deepcopy(c2axis)
c2axis = copy.deepcopy(tmpv)
c2perplike = c2like
c2like = 'ZAxis'
if c2like != 'ZAxis':
if c2like == 'XAxis':
c2axis = cross(c2axis, c2axisperp)
else:
c2axis = cross(c2axisperp, c2axis)
c2like, c2axis = self.like_world_axis(c2axis, worldxaxis, worldyaxis, worldzaxis)
# try to make c2axisperplike the x axis
if c2perplike == 'YAxis':
c2axisperp = cross(c2axisperp, c2axis)
c2perplike, c2axisperp = self.like_world_axis(c2axisperp, worldxaxis, worldyaxis, worldzaxis)
# Check for vertical plane
have_sigmav = False
if have_c2axis:
if self.natom() < 2:
have_sigmav = True
sigmav = copy.deepcopy(c2axisperp)
elif linear:
have_sigmav = True
if have_c2axisperp:
sigmav = copy.deepcopy(c2axisperp)
else:
sigmav = perp_unit(c2axis, [0.0, 0.0, 1.0])
else:
# loop through pairs of atoms to find sigma v plane candidates
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
# the second atom can equal i because i might be in the plane
for j in range(i + 1):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
inplane = add(B, A)
norm_inplane = norm(inplane)
if norm_inplane < tol:
continue
inplane = scale(inplane, 1.0 / norm_inplane)
perp = cross(c2axis, inplane)
norm_perp = norm(perp)
if norm_perp < tol:
continue
perp = scale(perp, 1.0 / norm_perp)
if self.is_plane(com, perp, tol):
have_sigmav = True
sigmav = copy.deepcopy(perp)
break
else:
continue
break
# symmframe found sigmav
if have_sigmav:
# try to make the sign of the oop vec correspond to one of the world axes
sigmavlike, sigmav = self.like_world_axis(sigmav, worldxaxis, worldyaxis, worldzaxis)
# Choose sigmav to be the world x axis, if possible
if c2like == 'ZAxis' and sigmavlike == 'YAxis':
sigmav = cross(sigmav, c2axis)
elif c2like == 'YAxis' and sigmavlike == 'ZAxis':
sigmav = cross(c2axis, sigmav)
# under certain conditions i need to know if there is any sigma plane
have_sigma = False
if not have_inversion and not have_c2axis:
if planar:
# find two noncolinear atom-atom vectors
# we know that linear==0 since !have_c2axis
BA = sub(self.xyz(1), self.xyz(0))
BA = normalize(BA)
for i in range(2, self.natom()):
CA = sub(self.xyz(i), self.xyz(0))
CA = normalize(CA)
BAxCA = cross(BA, CA)
if norm(BAxCA) > tol:
have_sigma = True
BAxCA = normalize(BAxCA)
sigma = copy.deepcopy(BAxCA)
break
else:
# loop through pairs of atoms to contruct trial planes
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
for j in range(i):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
BdotB = dot(B, B)
# the atoms must be the same distance from the com
if math.fabs(AdotA - BdotB) > tol:
continue
perp = sub(B, A)
norm_perp = norm(perp)
if norm_perp < tol:
continue
perp = scale(perp, 1.0 / norm_perp)
if self.is_plane(com, perp, tol):
have_sigma = True
sigma = copy.deepcopy(perp)
break
else:
continue
break
# foundsigma
if have_sigma:
# try to make the sign of the oop vec correspond to one of the world axes
xlikeness = math.fabs(dot(sigma, worldxaxis))
ylikeness = math.fabs(dot(sigma, worldyaxis))
zlikeness = math.fabs(dot(sigma, worldzaxis))
if xlikeness > ylikeness and xlikeness > zlikeness:
if dot(sigma, worldxaxis) < 0:
sigma = scale(sigma, -1.0)
elif ylikeness > zlikeness:
if dot(sigma, worldyaxis) < 0:
sigma = scale(sigma, -1.0)
else:
if dot(sigma, worldzaxis) < 0:
sigma = scale(sigma, -1.0)
# Find the three axes for the symmetry frame
xaxis = copy.deepcopy(worldxaxis)
zaxis = copy.deepcopy(worldzaxis)
if have_c2axis:
zaxis = copy.deepcopy(c2axis)
if have_sigmav:
xaxis = copy.deepcopy(sigmav)
elif have_c2axisperp:
xaxis = copy.deepcopy(c2axisperp)
else:
# any axis orthogonal to the zaxis will do
xaxis = perp_unit(zaxis, zaxis)
elif have_sigma:
zaxis = copy.deepcopy(sigma)
xaxis = perp_unit(zaxis, zaxis)
# Clean up our z axis
if math.fabs(zaxis[0]) < NOISY_ZERO:
zaxis[0] = 0.0
if math.fabs(zaxis[1]) < NOISY_ZERO:
zaxis[1] = 0.0
if math.fabs(zaxis[2]) < NOISY_ZERO:
zaxis[2] = 0.0
# Clean up our x axis
if math.fabs(xaxis[0]) < NOISY_ZERO:
xaxis[0] = 0.0
if math.fabs(xaxis[1]) < NOISY_ZERO:
xaxis[1] = 0.0
if math.fabs(xaxis[2]) < NOISY_ZERO:
xaxis[2] = 0.0
# the y is then -x cross z
yaxis = scale(cross(xaxis, zaxis), -1.0)
#print("xaxis %20.14lf %20.14lf %20.14lf" % (xaxis[0], xaxis[1], xaxis[2]))
#print("yaxis %20.14lf %20.14lf %20.14lf" % (yaxis[0], yaxis[1], yaxis[2]))
#print("zaxis %20.14lf %20.14lf %20.14lf" % (zaxis[0], zaxis[1], zaxis[2]))
frame = zero(3, 3)
for i in range(3):
frame[i][0] = xaxis[i]
frame[i][1] = yaxis[i]
frame[i][2] = zaxis[i]
return frame
def release_symmetry_information(self):
"""Release symmetry information"""
self.PYnunique = 0
self.nequiv = 0
self.PYatom_to_unique = 0
self.equiv = 0
def form_symmetry_information(self, tol=DEFAULT_SYM_TOL):
"""Initialize molecular specific symmetry information.
Uses the point group object obtain by calling point_group()
"""
if self.equiv:
self.release_symmetry_information()
if self.natom() == 0:
self.PYnunique = 0
self.nequiv = 0
self.PYatom_to_unique = 0
self.equiv = 0
print("""No atoms detected, returning\n""")
return
self.nequiv = []
self.PYatom_to_unique = [0] * self.natom()
self.equiv = []
if self.point_group().symbol() == 'c1':
self.PYnunique = self.natom()
for at in range(self.natom()):
self.nequiv.append(1)
self.PYatom_to_unique[at] = at
self.equiv.append([at])
return
# The first atom is always unique
self.PYnunique = 1
self.nequiv.append(1)
self.PYatom_to_unique[0] = 0
self.equiv.append([0])
ct = self.point_group().char_table()
so = SymmetryOperation()
np3 = [0.0, 0.0, 0.0]
current_geom = self.geometry(np_out=False)
current_Z = [self.Z(at) for at in range(self.natom())]
current_mass = [self.mass(at) for at in range(self.natom())]
# Find the equivalent atoms
for i in range(1, self.natom()):
ac = current_geom[i]
i_is_unique = True
i_equiv = 0
# Apply all symmetry ops in the group to the atom
for g in range(ct.order()):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
# See if the transformed atom is equivalent to a unique atom
for j in range(self.PYnunique):
unique = self.equiv[j][0]
aj = current_geom[unique]
if current_Z[unique] == current_Z[i] and \
abs(current_mass[unique] - current_mass[i]) < tol and \
distance(np3, aj) < tol:
i_is_unique = False
i_equiv = j
break
if i_is_unique:
self.nequiv.append(1)
self.PYatom_to_unique[i] = self.PYnunique
self.equiv.append([i])
self.PYnunique += 1
else:
self.equiv[i_equiv].append(i)
self.nequiv[i_equiv] += 1
self.PYatom_to_unique[i] = i_equiv
# The first atom in the equiv list is considered the primary
# unique atom. Just to make things look pretty, make the
# atom with the most zeros in its x, y, z coordinate the
# unique atom. Nothing else should rely on this being done.
ztol = 1.0e-5
for i in range(self.PYnunique):
maxzero = 0
jmaxzero = 0
for j in range(self.nequiv[i]):
nzero = 0
tmp = self.equiv[i][j]
arr = np.asarray(current_geom[tmp])
nzero = len(np.where(np.abs(arr) < ztol))
if nzero > maxzero:
maxzero = nzero
jmaxzero = j
tmp = self.equiv[i][jmaxzero]
self.equiv[i][jmaxzero] = self.equiv[i][0]
self.equiv[i][0] = tmp
#print('nunique', self.PYnunique)
#print('nequiv', self.nequiv)
#print('atom_to_unique', self.PYatom_to_unique)
#print('equiv', self.equiv)
def sym_label(self):
"""Returns the symmetry label"""
if self.pg is None:
self.set_point_group(self.find_point_group())
return self.pg.symbol()
def irrep_labels(self):
"""Returns the irrep labels"""
if self.pg is None:
self.set_point_group(self.find_point_group())
return [self.pg.char_table().gamma(i).symbol_ns() for i in range(self.pg.char_table().nirrep())]
def symmetry_from_input(self):
"""Returns the symmetry specified in the input.
>>> print(H2OH2O.symmetry_from_input())
C1
"""
return self.PYsymmetry_from_input
def symmetrize(self, tol=None):
"""Force the molecule to have the symmetry specified in pg.
This is to handle noise coming in from optking. Exception is thrown if
atoms cannot be mapped within tol(erance).
"""
#raise FeatureNotImplemented('Molecule::symmetrize') # FINAL SYMM
temp = zero(self.natom(), 3)
ct = self.point_group().char_table()
# Obtain atom mapping of atom * symm op to atom
# Allow compute_atom_map() to use its own default, if not specified here.
if tol is not None:
atom_map = compute_atom_map(self, tol)
else:
atom_map = compute_atom_map(self)
# Symmetrize the molecule to remove any noise
for at in range(self.natom()):
for g in range(ct.order()):
Gatom = atom_map[at][g]
so = ct.symm_operation(g)
# Full so must be used if molecule is not in standard orientation
temp[at][0] += so[0][0] * self.x(Gatom) / ct.order()
temp[at][0] += so[0][1] * self.y(Gatom) / ct.order()
temp[at][0] += so[0][2] * self.z(Gatom) / ct.order()
temp[at][1] += so[1][0] * self.x(Gatom) / ct.order()
temp[at][1] += so[1][1] * self.y(Gatom) / ct.order()
temp[at][1] += so[1][2] * self.z(Gatom) / ct.order()
temp[at][2] += so[2][0] * self.x(Gatom) / ct.order()
temp[at][2] += so[2][1] * self.y(Gatom) / ct.order()
temp[at][2] += so[2][2] * self.z(Gatom) / ct.order()
# Set the geometry to ensure z-matrix variables get updated
self.set_geometry(temp)
def schoenflies_symbol(self):
"""Returns the Schoenflies symbol"""
return self.point_group().symbol()
def valid_atom_map(self, tol=0.01):
"""Check if current geometry fits current point group
"""
np3 = [0.0, 0.0, 0.0]
ct = self.point_group().char_table()
# loop over all centers
for at in range(self.natom()):
ac = self.xyz(at)
# For each operation in the pointgroup, transform the coordinates of
# center "at" and see which atom it maps into
for g in range(ct.order()):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
if self.atom_at_position(np3, tol) < 0:
return False
return True
# provide a more transparent name for this utility
is_symmetric = valid_atom_map
# Test a set of xyz coordinates to see if they satisfy the symmetry operations
# of the current molecule.
def is_XYZ_symmetric(self, XYZ, tol=0.01):
testmol = self.clone()
testmol.set_geometry(XYZ)
return testmol.is_symmetric(tol)
#def valid_atom_map(self, tol=0.01):
# """Check if current geometry fits current point group
# """
# np3 = np.zeros(3)
# ct = self.point_group().char_table()
# current_geom = self.geometry(np_out=True)
# # loop over all centers
# for at in range(self.natom()):
# # For each operation in the pointgroup, transform the coordinates of
# # center "at" and see which atom it maps into
# for g in range(ct.order()):
# so = ct.symm_operation(g)
# np3 = so.dot(current_geom[at])
# if self.atom_at_position(np3, tol) < 0:
# return False
# return True
def full_point_group_with_n(self):
"""Return point group name such as Cnv or Sn."""
return self.full_pg
def full_pg_n(self):
"""Return n in Cnv, etc.; If there is no n (e.g. Td)
it's the highest-order rotation axis.
"""
return self.PYfull_pg_n
def get_full_point_group(self):
"""Return point group name such as C3v or S8.
(method name in libmints is full_point_group)
"""
pg_with_n = self.full_pg
if pg_with_n in ['D_inf_h', 'C_inf_v', 'C1', 'Cs', 'Ci', 'Td', 'Oh', 'Ih']:
return pg_with_n # These don't need changes - have no 'n'.
else:
return pg_with_n.replace('n', str(self.PYfull_pg_n), 1)
# <<< Methods for Uniqueness >>> (assume molecular point group has been determined)
def nunique(self):
"""Return the number of unique atoms."""
return self.PYnunique
def unique(self, iuniq):
"""Returns the overall number of the iuniq'th unique atom."""
return self.equiv[iuniq][0]
def nequivalent(self, iuniq):
"""Returns the number of atoms equivalent to iuniq."""
return self.nequiv[iuniq]
def equivalent(self, iuniq, j):
"""Returns the j'th atom equivalent to iuniq."""
return self.equiv[iuniq][j]
def atom_to_unique(self, iatom):
"""Converts an atom number to the number of its generating unique atom.
The return value is in [0, nunique).
"""
return self.PYatom_to_unique[iatom]
def atom_to_unique_offset(self, iatom):
"""Converts an atom number to the offset of this atom
in the list of generated atoms. The unique atom itself is allowed offset 0.
"""
iuniq = self.PYatom_to_unique[iatom]
nequiv = self.nequiv[iuniq]
for i in range(nequiv):
if self.equiv[iuniq][i] == iatom:
return i
raise ValidationError("Molecule::atom_to_unique_offset: I should've found the atom requested...but didn't.")
return -1
def max_nequivalent(self):
"""Returns the maximum number of equivalent atoms."""
mmax = 0
for i in range(self.nunique()):
if mmax < self.nequivalent(i):
mmax = self.nequivalent(i)
return mmax
def atom_present_in_geom(geom, b, tol=DEFAULT_SYM_TOL):
"""Function used by set_full_point_group() to scan a given geometry
and determine if an atom is present at a given location.
"""
for i in range(len(geom)):
a = [geom[i][0], geom[i][1], geom[i][2]]
if distance(b, a) < tol:
return True
return False
def matrix_3d_rotation_Cn(coord, axis, reflect, tol=DEFAULT_SYM_TOL, max_Cn_to_check=-1):
"""Find maximum n in Cn around given axis, i.e., the highest-order rotation axis.
@param coord Matrix : points to rotate - column dim is 3
@param axis Vector3 : axis around which to rotate, does not need to be normalized
@param bool reflect : if true, really look for Sn not Cn
@returns n
"""
# Check all atoms. In future, make more intelligent.
max_possible = len(coord) if max_Cn_to_check == -1 else max_Cn_to_check
Cn = 1 # C1 is there for sure
for n in range(2, max_possible + 1):
rotated_mat = matrix_3d_rotation(coord, axis, 2 * math.pi / n, reflect)
if equal_but_for_row_order(coord, rotated_mat, tol):
Cn = n
return Cn
def matrix_3d_rotation(mat, axis, phi, Sn):
"""For a matrix of 3D vectors (ncol==3), rotate a set of points around an
arbitrary axis. Vectors are the rows of the matrix.
@param axis Vector3 : axis around which to rotate (need not be normalized)
@param phi double : magnitude of rotation in rad
@param Sn bool : if true, then also reflect in plane through origin and perpendicular to rotation
@returns SharedMatrix with rotated points (rows)
"""
if len(mat[0]) != 3 or len(axis) != 3:
raise ValidationError("matrix_3d_rotation: Can only rotate matrix with 3d vectors")
# Normalize rotation vector
[wx, wy, wz] = normalize(axis)
cp = 1.0 - math.cos(phi)
R = zero(3, 3)
R[0][0] = wx * wx * cp + math.cos(phi)
R[0][1] = wx * wy * cp + math.sin(phi) * wz * -1
R[0][2] = wx * wz * cp + math.sin(phi) * wy
R[1][0] = wx * wy * cp + math.sin(phi) * wz
R[1][1] = wy * wy * cp + math.cos(phi)
R[1][2] = wy * wz * cp + math.sin(phi) * wx * -1
R[2][0] = wx * wz * cp + math.sin(phi) * wy * -1
R[2][1] = wy * wz * cp + math.sin(phi) * wx
R[2][2] = wz * wz * cp + math.cos(phi)
# R * coord^t = R_coord^t or coord * R^t = R_coord
#Matrix rotated_coord(nrow(),3);
#rotated_coord.gemm(false, true, 1.0, *this, R, 0.0);
rotated_coord = mult(mat, transpose(R))
# print('after C')
# show(rotated_coord)
if Sn: # delta_ij - 2 a_i a_j / ||a||^2
R = identity(3)
#R = zero(3, 3)
R[0][0] -= 2 * wx * wx
R[1][1] -= 2 * wy * wy
R[2][2] -= 2 * wz * wz
#R[0][0] = 1 - 2 * wx * wx
#R[1][1] = 1 - 2 * wy * wy
#R[2][2] = 1 - 2 * wz * wz
R[1][0] = 2 * wx * wy
R[2][0] = 2 * wx * wz
R[2][1] = 2 * wy * wz
R[0][1] = 2 * wx * wy
R[0][2] = 2 * wx * wz
R[1][2] = 2 * wy * wz
rotated_coord = mult(rotated_coord, transpose(R))
#tmp = mult(rotated_coord, transpose(R))
#Matrix tmp(nrow(),3);
#tmp.gemm(false, true, 1.0, rotated_coord, R, 0.0);
#rotated_coord.copy(tmp);
#rotated_coord = [row[:] for row in tmp]
#SharedMatrix to_return = rotated_coord.clone();
#return to_return
return rotated_coord
def equal_but_for_row_order(mat, rhs, tol=DEFAULT_SYM_TOL):
"""Checks matrix equality, but allows rows to be in a different order.
@param rhs Matrix to compare to.
@returns true if equal, otherwise false.
"""
for m in range(len(mat)):
for m_rhs in range(len(mat)):
for n in range(len(mat[m])):
if abs(mat[m][n] - rhs[m_rhs][n]) > tol:
break # from n
else:
# whole row matched, goto next m row
break # from m_rhs
else:
# no matching row was found
return False
else:
return True
def compute_atom_map(mol, tol=0.05):
"""Computes atom mappings during symmetry operations. Useful in
generating SO information and Cartesian displacement SALCs.
param mol Molecule to form mapping matrix from.
returns Integer matrix of dimension natoms X nirreps.
"""
# create the character table for the point group
ct = mol.point_group().char_table()
natom = mol.natom()
ng = ct.order()
atom_map = [0] * natom
for i in range(natom):
atom_map[i] = [0] * ng
np3 = [0.0, 0.0, 0.0]
so = SymmetryOperation()
# loop over all centers
for i in range(natom):
ac = mol.xyz(i)
# then for each symop in the pointgroup, transform the coordinates of
# center "i" and see which atom it maps into
for g in range(ng):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
atom_map[i][g] = mol.atom_at_position(np3, tol)
if atom_map[i][g] < 0:
print(""" Molecule:\n""")
mol.print_out()
print(""" attempted to find atom at\n""")
print(""" %lf %lf %lf\n""" % (np3[0], np3[1], np3[2]))
raise ValidationError("ERROR: Symmetry operation %d did not map atom %d to another atom:\n" % (g, i + 1))
return atom_map
# TODO outfile
# ignored =, +, 0, += assignment operators
# no pubchem
# TODO rename save_string_for_psi4
# TODO add no_com no_reorint in save string for psi4
|
psi4/psi4
|
psi4/driver/qcdb/libmintsmolecule.py
|
Python
|
lgpl-3.0
| 121,856
|
[
"Elk",
"Psi4"
] |
800e7d78631a1f62ce23483ffd68bb8313d737d238dc0882a6164527139a2959
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core import mail
from django.conf import settings
from datetime import *
from tradeschool.models import *
class RegistrationTestCase(TestCase):
"""
Tests the process of registering and unregistering
to a course using the frontend forms.
"""
fixtures = ['email_initial_data.json',
'teacher-info.json',
'sample_data.json'
]
def setUp(self):
"""
"""
# test in english so we count html strings correctly
settings.LANGUAGE_CODE = 'en'
# change the language to english for language-based assertations
self.branch = Branch.objects.all()[0]
self.branch.language = 'en'
self.branch.save()
self.course = ApprovedCourse.objects.filter(branch=self.branch)[0]
self.valid_data = {
'student-fullname': 'test student',
'student-email': 'test123!@email.com',
'student-phone': '',
}
self.url = reverse('course-register', kwargs={
'branch_slug': self.branch.slug,
'course_slug': self.course.slug
})
def compare_registration_to_data(self, registration_obj):
""" Asserts that the objects that were created after a successful
registration submission match the data that was used in the forms.
"""
# get ApprovedCourse instance to verify against
approved_course = ApprovedCourse.objects.get(
pk=registration_obj.course.pk)
self.assertEqual(approved_course, self.course)
self.assertEqual(
registration_obj.student.fullname,
self.valid_data['student-fullname']
)
self.assertEqual(registration_obj.registration_status, 'registered')
self.assertTrue(self.branch in registration_obj.student.branches.all())
for registered_item in registration_obj.items.all():
self.assertEqual(
registered_item.pk,
int(self.valid_data['item-items'][0])
)
def do_register(self):
""" Register to a given course.
"""
item = self.course.barteritem_set.all()[0]
self.valid_data['item-items'] = [item.pk, ]
# post a valid form
response = self.client.post(
self.url, data=self.valid_data, follow=True)
return response
def test_view_is_loading(self):
"""
Tests that the course-register view loads with the correct template.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(self.branch.slug + '/course_register.html')
def test_registration_empty_form(self):
"""
Test that an empty submitted registration form returns the expected
number of errors, for fullname, email, and at least one checked item.
"""
data = {}
# post an empty form
response = self.client.post(self.url, data=data, follow=True)
# an empty form should return 3 errors for the required fields
self.assertContains(response, 'Please', count=3)
def test_registration_valid_form(self):
"""
Tests that a submission of valid data
results in a successful registration.
"""
response = self.do_register()
self.assertTemplateUsed(self.branch.slug + '/course_registered.html')
#print response.context
# check that the registration got saved correctly
self.compare_registration_to_data(response.context['registration'])
def test_student_confirmation_email(self):
"""
Tests that the StudentConfirmation is sent
after a course is approved.
"""
# register to a course
self.do_register()
# test that one message was sent.
self.assertEqual(len(mail.outbox), 1)
email = self.course.studentconfirmation
#self.assertEqual(email.email_status, 'sent')
# verify that the subject of the message is correct.
self.assertEqual(mail.outbox[0].subject, email.subject)
def test_register_again(self):
""" Tests that a student who is already registered to a scheduled class
can't register to it again.
"""
# register
response = self.do_register()
# register again
response = self.do_register()
# make sure the same template is used (didn't redirect)
self.assertTemplateUsed(self.branch.slug + '/course_registered.html')
# check that the error message is in the page
self.assertContains(
response, 'You are already registered to this class')
def test_capacity(self):
"""
Tests that the Join button is only visible
if there are empty seats in the course.
This should also test that a POST request
can't be made to a course in full capacity.
"""
response = self.client.get(self.url)
# the course has not registrations,
# so the join button should be in the HTML
self.assertContains(response, 'value="Join"')
# add registrations to fill the course
for i in range(self.course.max_students):
# first create a student to register to the scheduled class
student_fullname = "student-%i" % i
student_email = "%i@email.com" % i
student = Person.objects.create_user(
fullname=student_fullname,
email=student_email,
slug=student_fullname
)
student.save()
student.branches.add(self.branch)
# then create the registration itself
registration = Registration(
course=self.course,
student=student
)
registration.save()
# visit the page again
response = self.client.get(self.url)
# the course should be full,
# so the join button should NOT be in the HTML
self.assertNotContains(response, 'value="Join"')
def test_unregistration(self):
"""
Tests that the course-unregister view loads with the
correct template, that unregistering changes the status in
the Registration object, and that it is not possible to
unregister more than once.
"""
# register
response = self.do_register()
registration = response.context['registration']
# construct unregister url from branch, course,
# and saved registration
url = reverse('course-unregister', kwargs={
'branch_slug': self.branch.slug,
'course_slug': self.course.slug,
'student_slug': registration.student.slug
})
# go to the url
response = self.client.get(url)
# check that the correct template is loading
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(self.branch.slug + '/course_register.html')
# unregister
response = self.client.post(url, data={}, follow=True)
# check that the page was redirected
self.assertRedirects(
response,
response.redirect_chain[0][0],
response.redirect_chain[0][1]
)
self.assertTemplateUsed(self.branch.slug + '/course_list.html')
# get registration again after it was saved in the view function
registration = Registration.objects.get(pk=registration.pk)
# check that the registration status was changed
self.assertEqual(registration.registration_status, 'unregistered')
# try unregistering again
response = self.client.get(url)
# make sure it's not possible
self.assertContains(response, 'already unregistered')
def tearDown(self):
""" Delete branch files in case something went wrong
and the files weren't deleted.
"""
# delete branches' files
for branch in Branch.objects.all():
branch.delete_files()
|
orzubalsky/tradeschool
|
ts/apps/tradeschool/tests/registration.py
|
Python
|
gpl-3.0
| 8,207
|
[
"VisIt"
] |
1d3ff50e4a43f52b203f2e1b11dc60093b4c693ccf0428aa83059c28a768bce1
|
# Copyright 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
"""Code defining the API that is actually exposed over HTTP."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import six
from six.moves import urllib
from six.moves.urllib.parse import urljoin
from twisted.application.service import Service
from twisted.internet import defer
from twisted.internet import endpoints
from twisted.plugin import getPlugins
from twisted.logger import Logger
from twisted.web import static
from twisted.web.resource import Resource
from twisted.web.util import Redirect
import txws
import shinysdr.i.db
from shinysdr.i.json import serialize
from shinysdr.i.modes import get_modes
from shinysdr.i.network.base import CAP_OBJECT_PATH_ELEMENT, IWebEntryPoint, SiteWithDefaultHeaders, SlashedResource, UNIQUE_PUBLIC_CAP, WebServiceCommon, deps_path, static_resource_path, endpoint_string_to_url
from shinysdr.i.network.export_http import CapAccessResource
from shinysdr.i.network.export_ws import WebSocketDispatcherProtocol
from shinysdr.i.poller import the_poller
from shinysdr.i.pycompat import defaultstr
from shinysdr.i.shared_test_objects import SHARED_TEST_OBJECTS_CAP
from shinysdr.interfaces import _IClientResourceDef
from shinysdr.twisted_ext import FactoryWithArgs
from shinysdr.values import SubscriptionContext
def _make_static_resource(pathname):
# str() because if we happen to pass unicode as the pathname then directory listings break (discovered with Twisted 16.4.1).
r = static.File(str(pathname),
defaultType=b'text/plain',
ignoredExts=[b'.html'])
r.contentTypes[b'.csv'] = b'text/csv'
r.indexNames = [b'index.html']
return r
class WebService(Service):
__log = Logger()
def __init__(self,
reactor,
cap_table,
http_endpoint,
ws_endpoint,
http_base_url,
ws_base_url,
root_cap,
title):
# Constants
self.__http_endpoint_string = str(http_endpoint)
self.__http_endpoint = endpoints.serverFromString(reactor, self.__http_endpoint_string)
self.__ws_endpoint = endpoints.serverFromString(reactor, str(ws_endpoint))
self.__visit_path = _make_cap_url(root_cap)
self.__http_base_url_if_explicit = http_base_url
wcommon = WebServiceCommon(
reactor=reactor,
title=title,
ws_endpoint_string=ws_endpoint,
ws_base_url=ws_base_url)
# TODO: Create poller actually for the given reactor w/o redundancy -- perhaps there should be a one-poller-per-reactor map
subscription_context = SubscriptionContext(reactor=reactor, poller=the_poller)
def resource_factory(entry_point):
# TODO: If not an IWebEntryPoint, return a generic result
return IWebEntryPoint(entry_point).get_entry_point_resource(wcommon=wcommon) # pylint: disable=redundant-keyword-arg
server_root = CapAccessResource(cap_table=cap_table, resource_factory=resource_factory)
_put_root_static(wcommon, server_root)
if UNIQUE_PUBLIC_CAP in cap_table:
# TODO: consider factoring out "generate URL for cap"
server_root.putChild('', Redirect(_make_cap_url(UNIQUE_PUBLIC_CAP)))
self.__ws_protocol = txws.WebSocketFactory(
FactoryWithArgs.forProtocol(WebSocketDispatcherProtocol, cap_table, subscription_context))
self.__site = SiteWithDefaultHeaders(server_root)
self.__ws_port_obj = None
self.__http_port_obj = None
@defer.inlineCallbacks
def startService(self):
Service.startService(self)
if self.__ws_port_obj is not None:
raise Exception('Already started')
self.__http_port_obj = yield self.__http_endpoint.listen(self.__site)
self.__ws_port_obj = yield self.__ws_endpoint.listen(self.__ws_protocol)
def stopService(self):
Service.stopService(self)
if self.__ws_port_obj is None:
raise Exception('Not started, cannot stop')
# TODO: Does Twisted already have something to bundle up a bunch of ports for shutdown?
return defer.DeferredList([
self.__http_port_obj.stopListening(),
self.__ws_port_obj.stopListening()])
def get_host_relative_url(self):
"""Get the host-relative URL of the service.
This method exists primarily for testing purposes."""
return self.__visit_path
def get_url(self):
"""Get the absolute URL of the service. Cannot be used before startService is called.
This method exists primarily for testing purposes."""
# TODO: This logic is duplicated with wcommon.make_websocket_url
if self.__http_base_url_if_explicit is not None:
return urljoin(self.__http_base_url_if_explicit, self.get_host_relative_url())
else:
# TODO: need to know canonical domain name (endpoint_string_to_url defaults to localhost); can we extract the information from the certificate when applicable?
return endpoint_string_to_url(
self.__http_endpoint_string,
listening_port=self.__http_port_obj,
path=self.get_host_relative_url())
def announce(self, open_client):
"""interface used by shinysdr.main"""
url = self.get_url()
if open_client:
self.__log.info('Opening {url}', url=url)
import webbrowser # lazy load
webbrowser.open(url, new=1, autoraise=True)
else:
self.__log.info('Visit {url}', url=url)
def _put_root_static(wcommon, container_resource):
"""Place all the simple resources, that are not necessarily sourced from files but at least are unchanging and public."""
for name in ['', 'client', 'test', 'manual', 'tools']:
container_resource.putChild(name, _make_static_resource(os.path.join(static_resource_path, name if name != '' else 'index.html')))
# Link deps into /client/.
client = container_resource.children['client']
for name in ['require.js', 'text.js']:
client.putChild(name, _make_static_resource(os.path.join(deps_path, name)))
for name in ['measviz.js', 'measviz.css']:
client.putChild(name, _make_static_resource(os.path.join(deps_path, 'measviz/src', name)))
# Link deps into /test/.
test = container_resource.children['test']
jasmine = SlashedResource()
test.putChild('jasmine', jasmine)
for name in ['jasmine.css', 'jasmine.js', 'jasmine-html.js']:
jasmine.putChild(name, _make_static_resource(os.path.join(
deps_path, 'jasmine/lib/jasmine-core/', name)))
# Special resources
container_resource.putChild('favicon.ico',
_make_static_resource(os.path.join(static_resource_path, 'client/icon/icon-32.png')))
client.putChild('web-app-manifest.json',
WebAppManifestResource(wcommon))
_put_plugin_resources(wcommon, client)
def _put_plugin_resources(wcommon, client_resource):
"""Plugin-defined resources and client-configuration."""
load_list_css = []
load_list_js = []
mode_table = {}
plugin_resources = Resource()
client_resource.putChild('plugins', plugin_resources)
for resource_def in getPlugins(_IClientResourceDef, shinysdr.plugins):
# Add the plugin's resource to static serving
plugin_resources.putChild(resource_def.key, resource_def.resource)
plugin_resource_url = '/client/plugins/' + urllib.parse.quote(resource_def.key, safe='') + '/'
# Tell the client to load the plugins
# TODO constrain path values to be relative (not on a different origin, to not leak urls)
if resource_def.load_css_path is not None:
load_list_css.append(plugin_resource_url + resource_def.load_cs_path)
if resource_def.load_js_path is not None:
# TODO constrain value to be in the directory
load_list_js.append(plugin_resource_url + resource_def.load_js_path)
for mode_def in get_modes():
mode_table[mode_def.mode] = {
u'info_enum_row': mode_def.info.to_json(),
u'can_transmit': mode_def.mod_class is not None
}
plugin_index = {
'css': load_list_css,
'js': load_list_js,
'modes': mode_table,
}
client_resource.putChild('client-configuration.json', ClientConfigurationResource(wcommon, plugin_index))
class ClientConfigurationResource(Resource):
"""Info about plugins and other not-strictly-static data."""
isLeaf = True
def __init__(self, wcommon, plugin_index):
Resource.__init__(self)
self.__wcommon = wcommon
self.__plugin_index = plugin_index
def render_GET(self, request):
request.setHeader(b'Content-Type', b'application/json')
configuration = {
'plugins': self.__plugin_index,
# TODO: oughta be a shorter path to this -- normally websocket url is constructed from a http url gotten from the request so we don't have this exact path
'shared_test_objects_url': self.__wcommon.make_websocket_url(request,
_make_cap_url(SHARED_TEST_OBJECTS_CAP) + CAP_OBJECT_PATH_ELEMENT),
}
return serialize(configuration).encode('utf-8')
class WebAppManifestResource(Resource):
"""
Per https://www.w3.org/TR/appmanifest/
"""
isLeaf = True
def __init__(self, wcommon):
Resource.__init__(self)
self.__title = wcommon.title
def render_GET(self, request):
request.setHeader(b'Content-Type', b'application/manifest+json')
manifest = {
'lang': 'en-US',
'name': self.__title,
'short_name': self.__title if len(self.__title) <= 12 else 'ShinySDR',
'scope': '/',
'icons': [
{
'src': '/client/icon/icon-32.png',
'type': 'image/png',
'sizes': '32x32',
},
{
'src': '/client/icon/icon.svg',
'type': 'image/svg',
'sizes': 'any',
},
],
'display': 'minimal-ui',
'orientation': 'any',
'theme_color': '#B9B9B9', # same as gray.css --shinysdr-theme-column-bgcolor
'background_color': '#2F2F2F', # note this is our loading screen color
}
return serialize(manifest).encode('utf-8')
def _make_cap_url(cap):
assert isinstance(cap, six.text_type)
return defaultstr('/' + urllib.parse.quote(cap.encode('utf-8'), safe='') + '/')
|
kpreid/shinysdr
|
shinysdr/i/network/webapp.py
|
Python
|
gpl-3.0
| 11,530
|
[
"VisIt"
] |
a97b67159814ccb67c6bdb8287e4467c5c8f50222b5854fa3a8935746b3250ca
|
# -*- coding: utf-8 -*-
"""
CPP数据结构定义
"""
from cppvisitattr import CppVisit
class CppData(object):
def __init__(self, ty, name, ptr=False, static=False, visit=CppVisit(CppVisit.NONE), init=None):
self.__type = ty
self.__name = name
self.__ptr = ptr
self.__static = static
self.__visit = visit
self.__init = init
def build(self, indent=0):
s = self.__visit.build(indent - 4)
s += ' ' * indent
if self.__static:
s += 'static '
s += self.__type
if self.__ptr:
s += ' *'
s += ' ' + self.__name
if self.__init:
s += ' = {0}'.format(self.__init)
s += ';\n'
return s
|
lailongwei/llbc
|
tools/building_script/cpputils/cppdata.py
|
Python
|
mit
| 745
|
[
"VisIt"
] |
64f9441a94644e5788c371812ce6d55900921049f746e8500749658ac2327b96
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import FeneBond
from espressomd.pair_criteria import *
class PairCriteria(ut.TestCase):
"""Tests interface and implmentation of pair criteria"""
es = espressomd.System(box_l=[1.,1.,1.])
f1 = FeneBond(k=1, d_r_max=0.05)
es.bonded_inter.add(f1)
f2 = FeneBond(k=1, d_r_max=0.05)
es.bonded_inter.add(f2)
es.part.add(id=0,pos=(0,0,0))
es.part.add(id=1,pos=(0.91,0,0))
p1=es.part[0]
p2=es.part[1]
epsilon=1E-8
def test_distance_crit_periodic(self):
dc=DistanceCriterion(cut_off=0.1)
# Interface
self.assertEqual(list(dc.get_params().keys()),["cut_off",])
self.assertTrue(abs(dc.get_params()["cut_off"]-0.1)<self.epsilon)
# Decisions
# Periodic system. Particles in range via minimum image convention
self.es.periodicity =(1,1,1)
self.assertTrue(dc.decide(self.p1,self.p2))
self.assertTrue(dc.decide(self.p1.id,self.p2.id))
@ut.skipIf(not espressomd.has_features("PARTIAL_PERIODIC"),"skiped for lack of PARTIAL_PERIODIC")
def test_distance_crit_non_periodic(self):
dc=DistanceCriterion(cut_off=0.1)
# Non-periodic system. Particles out of range
self.es.periodicity =(0,0,0)
self.assertTrue(not dc.decide(self.p1,self.p2))
self.assertTrue(not dc.decide(self.p1.id,self.p2.id))
@ut.skipIf(not espressomd.has_features("LENNARD_JONES"), "skipped due to missing lj potential")
def test_energy_crit(self):
# Setup purely repulsive lj
self.es.non_bonded_inter[0,0].lennard_jones.set_params(sigma=0.11,epsilon=1,cutoff=2**(1./6.)*0.11,shift="auto")
ec=EnergyCriterion(cut_off=0.001)
# Interface
self.assertEqual(list(ec.get_params().keys()),["cut_off",])
self.assertTrue(abs(ec.get_params()["cut_off"]-0.001)<self.epsilon)
# Decisions
# Periodic system. Particles in range via minimum image convention
self.es.periodicity =(1,1,1)
self.assertTrue(ec.decide(self.p1,self.p2))
self.assertTrue(ec.decide(self.p1.id,self.p2.id))
@ut.skipIf(not espressomd.has_features("LENNARD_JONES") or
not espressomd.has_features("PARTIAL_PERIODIC"),
"skipped due to missing lj potential")
def test_energy_crit_non_periodic(self):
# Setup purely repulsive lj
self.es.non_bonded_inter[0,0].lennard_jones.set_params(sigma=0.11,epsilon=1,cutoff=2**(1./6.)*0.11,shift="auto")
ec=EnergyCriterion(cut_off=0.001)
# Interface
self.assertEqual(list(ec.get_params().keys()),["cut_off",])
self.assertTrue(abs(ec.get_params()["cut_off"]-0.001)<self.epsilon)
# Non-periodic system. Particles out of range
self.es.periodicity =(0,0,0)
self.assertTrue(not ec.decide(self.p1,self.p2))
self.assertTrue(not ec.decide(self.p1.id,self.p2.id))
def test_bond_crit(self):
bc=BondCriterion(bond_type=0)
# Interface
self.assertEqual(list(bc.get_params().keys()),["bond_type",])
self.assertEqual(bc.get_params()["bond_type"],0)
# Decisions
# No bond yet. Should return false
self.assertTrue(not bc.decide(self.p1,self.p2))
self.assertTrue(not bc.decide(self.p1.id,self.p2.id))
# Add bond. Then the criterion should match
self.es.part[0].bonds=((0,1),)
self.assertTrue(bc.decide(self.p1,self.p2))
self.assertTrue(bc.decide(self.p1.id,self.p2.id))
# Place bond on the 2nd particle. The criterion should still match
self.es.part[0].bonds=()
self.es.part[1].bonds=((0,0),)
self.assertTrue(bc.decide(self.p1,self.p2))
self.assertTrue(bc.decide(self.p1.id,self.p2.id))
if __name__ == "__main__":
#print("Features: ", espressomd.features())
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/pair_criteria.py
|
Python
|
gpl-3.0
| 4,765
|
[
"ESPResSo"
] |
f051f131f8c1f71590f7d7a24ef232a3229bc0da5ef7bb991fac6080c6cf6746
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pymaid documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 9 22:17:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import numpydoc
import sphinx_bootstrap_theme
import json
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.sphinxext.plot_directive
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../..'))
import mock
MOCK_MODULES = ['py2cytoscape.data.cyrest_client.CyRestClient',
'rpy2.robjects.packages.importr'
'rpy2.robjects.pandas2ri',
'rpy2.robjects.numpy2ri',
'rpy2.robjects.conversion.localconverter']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
import pymaid
#from pymaid import cytoscape
from subprocess import check_call as sh
def convert_nb(nbname, execute=False):
"""Remove tags."""
if execute:
# Execute the notebook
sh(["jupyter", "nbconvert", "--to", "notebook",
"--execute", "--inplace", nbname])
# Convert to .rst for Sphinx
sh(["jupyter", "nbconvert", "--to", "rst", nbname,
"--TagRemovePreprocessor.remove_cell_tags={'hide'}",
"--TagRemovePreprocessor.remove_input_tags={'hide-input'}",
"--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}"])
if execute:
# Clear notebook output
sh(["jupyter", "nbconvert", "--to", "notebook", "--inplace",
"--ClearOutputPreprocessor.enabled=True", nbname])
# Touch the .rst file so it has a later modify time than the source
sh(["touch", nbname.replace('.ipynb', '') + ".rst"])
def remove_hbox(filepath):
"""Drop 'Hbox(children... lines from .rst files."""
with open(filepath, 'r') as f:
s = f.read()
if 'HBox(children' in s:
s = re.sub(".. parsed-literal::\n\n.*?HTML\(value=''\)\)\)", '', s)
with open(filepath, 'w') as f:
f.write(s)
# -- Make execution numbers in Jupyter notebooks ascending -------------------
source_path = os.path.dirname(os.path.abspath(__file__)) + '/source'
all_nb = list()
for (dirpath, dirnames, filenames) in os.walk(source_path):
# Skip checkpoints
if 'checkpoint' in dirpath:
continue
all_nb += [os.path.join(dirpath, file) for file in filenames if file.endswith('.ipynb')]
for nb in all_nb:
convert_nb(nb)
remove_hbox(nb.replace('.ipynb', '.rst'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode', # This will add links to source code to autodoc
#'sphinx.ext.linkcode', # This is similar to viewcode but links to external source -> need to define a function for this
'sphinx.ext.napoleon',
#'sphinx.ext.mathjax', # mathjax is interactive and configurable but can also misbehave when rendering - switched to imgmath instead
'sphinx.ext.imgmath',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.intersphinx'
#'numpydoc'
]
intersphinx_mapping = {'navis': ('https://navis.readthedocs.io/en/latest/', None)}
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# generate autosummary pages
autosummary_generate = True
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pymaid'
copyright = '2017, Philipp Schlegel'
author = 'Philipp Schlegel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pymaid.__version__
# The full version, including alpha/beta/rc tags.
release = pymaid.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'source_link_position': "footer",
'bootswatch_theme': "paper",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [
("Install", "source/install"),
("Tutorial", "source/intro"),
("Examples", "source/example_gallery"),
("API", "source/api"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'pymaid v0.8'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/favicon.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymaiddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymaid.tex', 'pymaid Documentation',
'Philipp Schlegel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymaid', 'pymaid Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pymaid', 'pymaid Documentation',
author, 'pymaid', 'Neuron analysis toolbox for CATMAID data.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet('style.css')
|
schlegelp/pymaid
|
docs/conf.py
|
Python
|
gpl-3.0
| 13,592
|
[
"Cytoscape",
"NEURON"
] |
1bd34f3d04aa81d65e4796bded6d8c7e1c6ce86b5a66ebb5c1e5f4cc004a142c
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example takes awhile to execute. It uses a genetic algorithm to fit an RBF network to the iris data set.
You can see the output from the example here. As you can see, it took 58 iterations to train to 0.05.
You can see that it is able to classify many of the iris species correctly, but not all.
This example uses one-of-n encoding for the iris species. Equilateral could have also been used.
Generaton #1, Score=0.199843346838, stagnant=0
Generaton #2, Score=0.199843346838, stagnant=0
Generaton #3, Score=0.193606061977, stagnant=1
Generaton #4, Score=0.182932591913, stagnant=0
Generaton #5, Score=0.165157776619, stagnant=0
Generaton #6, Score=0.15796529294, stagnant=0
Generaton #7, Score=0.157826592807, stagnant=0
Generaton #8, Score=0.149478480898, stagnant=1
Generaton #9, Score=0.142609733514, stagnant=0
Generaton #10, Score=0.141267076301, stagnant=0
Generaton #11, Score=0.13387570015, stagnant=0
Generaton #12, Score=0.131977908763, stagnant=0
Generaton #13, Score=0.126539359115, stagnant=0
Generaton #14, Score=0.122389808687, stagnant=0
Generaton #15, Score=0.121392668139, stagnant=0
Generaton #16, Score=0.11318352856, stagnant=1
Generaton #17, Score=0.111552631929, stagnant=0
Generaton #18, Score=0.104332331742, stagnant=0
Generaton #19, Score=0.103101332438, stagnant=0
Generaton #20, Score=0.100584671844, stagnant=0
Generaton #21, Score=0.0974004283988, stagnant=0
Generaton #22, Score=0.094533902446, stagnant=0
Generaton #23, Score=0.0910003821609, stagnant=0
Generaton #24, Score=0.0910003821609, stagnant=0
Generaton #25, Score=0.0905620576106, stagnant=1
Generaton #26, Score=0.0866654176526, stagnant=2
Generaton #27, Score=0.0826733880209, stagnant=0
Generaton #28, Score=0.0816455270936, stagnant=0
Generaton #29, Score=0.0799649368276, stagnant=0
Generaton #30, Score=0.0797301141794, stagnant=0
Generaton #31, Score=0.0774793573792, stagnant=1
Generaton #32, Score=0.0767527501314, stagnant=0
Generaton #33, Score=0.0764559059563, stagnant=1
Generaton #34, Score=0.0749918540669, stagnant=2
Generaton #35, Score=0.0723100319898, stagnant=0
Generaton #36, Score=0.071279017377, stagnant=0
Generaton #37, Score=0.0692806352376, stagnant=0
Generaton #38, Score=0.0687199631007, stagnant=0
Generaton #39, Score=0.0671800095714, stagnant=1
Generaton #40, Score=0.0651154796387, stagnant=0
Generaton #41, Score=0.0640848760543, stagnant=0
Generaton #42, Score=0.062768548122, stagnant=0
Generaton #43, Score=0.0623897612924, stagnant=0
Generaton #44, Score=0.0613174410677, stagnant=1
Generaton #45, Score=0.0600323016682, stagnant=0
Generaton #46, Score=0.0590140769361, stagnant=0
Generaton #47, Score=0.0579662753868, stagnant=0
Generaton #48, Score=0.0563771595186, stagnant=0
Generaton #49, Score=0.0557091224927, stagnant=0
Generaton #50, Score=0.0557091224927, stagnant=1
Generaton #51, Score=0.0556228207268, stagnant=2
Generaton #52, Score=0.0547559332724, stagnant=3
Generaton #53, Score=0.0547559332724, stagnant=4
Generaton #54, Score=0.0544944263627, stagnant=5
Generaton #55, Score=0.0539352236468, stagnant=6
Generaton #56, Score=0.0535581096618, stagnant=7
Generaton #57, Score=0.0527253713172, stagnant=8
Generaton #58, Score=0.0525153691128, stagnant=9
[ 0.22222222 0.625 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.41666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.11111111 0.5 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.45833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.66666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.79166667 0.11864407 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.58333333 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.58333333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.375 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.70833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.58333333 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.41666667 0.06779661 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0. 0.41666667 0.01694915 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.41666667 0.83333333 0.03389831 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.38888889 1. 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.79166667 0.05084746 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.625 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.38888889 0.75 0.11864407 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.08474576 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.58333333 0.11864407 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.70833333 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.66666667 0. 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.54166667 0.11864407 0.16666667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.58333333 0.15254237 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.41666667 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.58333333 0.10169492 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.625 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.58333333 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.11111111 0.5 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.45833333 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.58333333 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.875 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.33333333 0.91666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.5 0.03389831 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.33333333 0.625 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.41666667 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.58333333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.625 0.05084746 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.05555556 0.125 0.05084746 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.5 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.625 0.10169492 0.20833333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.15254237 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.41666667 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.5 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.27777778 0.70833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.54166667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.75 0.5 0.62711864 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.58333333 0.5 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.72222222 0.45833333 0.66101695 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.125 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.61111111 0.33333333 0.61016949 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.33333333 0.59322034 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.54166667 0.62711864 0.625 ] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.16666667 0.16666667 0.38983051 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.63888889 0.375 0.61016949 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.25 0.29166667 0.49152542 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.19444444 0. 0.42372881 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.44444444 0.41666667 0.54237288 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.08333333 0.50847458 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.375 0.62711864 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.375 0.44067797 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.66666667 0.45833333 0.57627119 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.41666667 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.29166667 0.52542373 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.52777778 0.08333333 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.20833333 0.49152542 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.44444444 0.5 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.5 0.33333333 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.20833333 0.66101695 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.33333333 0.62711864 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.58333333 0.375 0.55932203 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.63888889 0.41666667 0.57627119 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.69444444 0.33333333 0.6440678 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.66666667 0.41666667 0.6779661 0.66666667] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.47222222 0.375 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.25 0.42372881 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.16666667 0.47457627 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.16666667 0.45762712 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.29166667 0.49152542 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.29166667 0.69491525 0.625 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.30555556 0.41666667 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.58333333 0.59322034 0.625 ] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.66666667 0.45833333 0.62711864 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.125 0.57627119 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.41666667 0.52542373 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.20833333 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.25 0.57627119 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.41666667 0.61016949 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.25 0.50847458 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.19444444 0.125 0.38983051 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.29166667 0.54237288 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.41666667 0.54237288 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.375 0.54237288 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.52777778 0.375 0.55932203 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.22222222 0.20833333 0.33898305 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.33333333 0.52542373 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.54166667 0.84745763 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.29166667 0.69491525 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.77777778 0.41666667 0.83050847 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.375 0.77966102 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.81355932 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.91666667 0.41666667 0.94915254 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.16666667 0.20833333 0.59322034 0.66666667] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.83333333 0.375 0.89830508 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.20833333 0.81355932 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.66666667 0.86440678 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.5 0.69491525 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.29166667 0.72881356 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.69444444 0.41666667 0.76271186 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.38888889 0.20833333 0.6779661 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.33333333 0.69491525 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.5 0.72881356 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.76271186 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.75 0.96610169 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.25 1. 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.47222222 0.08333333 0.6779661 0.58333333] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.72222222 0.5 0.79661017 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.36111111 0.33333333 0.66101695 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.33333333 0.96610169 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.29166667 0.66101695 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.54166667 0.79661017 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.5 0.84745763 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.52777778 0.33333333 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.5 0.41666667 0.66101695 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.33333333 0.77966102 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.41666667 0.81355932 0.625 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.86111111 0.33333333 0.86440678 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 1. 0.75 0.91525424 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.33333333 0.77966102 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.33333333 0.69491525 0.58333333] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.5 0.25 0.77966102 0.54166667] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.94444444 0.41666667 0.86440678 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.58333333 0.77966102 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.45833333 0.76271186 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.47222222 0.41666667 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.72222222 0.45833333 0.74576271 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.45833333 0.77966102 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.72222222 0.45833333 0.69491525 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.29166667 0.69491525 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.69444444 0.5 0.83050847 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.54166667 0.79661017 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.41666667 0.71186441 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.20833333 0.6779661 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.71186441 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.52777778 0.58333333 0.74576271 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.44444444 0.41666667 0.69491525 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
Process finished with exit code 0
"""
__author__ = 'jheaton'
import os
import sys
import numpy as np
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from normalize import Normalize
from rbf_network import RbfNetwork
from error import ErrorCalculation
from genetic import *
from aco import *
# find the Iris data set
irisFile = os.path.dirname(os.path.realpath(__file__))
irisFile = os.path.abspath(irisFile + "../../datasets/iris.csv")
# Read the Iris data set.
print('Reading CSV file: ' + irisFile)
norm = Normalize()
iris_work = norm.load_csv(irisFile)
# Extract the original iris species so we can display during the final validation.
ideal_species = [row[4] for row in iris_work]
# Setup the first four fields to "range normalize" between -1 and 1.
for i in range(0, 4):
norm.make_col_numeric(iris_work, i)
norm.norm_col_range(iris_work, i, 0, 1)
# Discover all of the classes for column #4, the iris species.
classes = norm.build_class_map(iris_work, 4)
inv_classes = {v: k for k, v in classes.items()}
# Normalize iris species using one-of-n.
# We could have used equilateral as well. For an example of equilateral, see the example_nm_iris example.
norm.norm_col_one_of_n(iris_work, 4, classes, 0, 1)
# Prepare training data. Separate into input and ideal.
training = np.array(iris_work)
training_input = training[:, 0:4]
training_ideal = training[:, 4:7]
# Create an RBF network. There are four inputs and two outputs.
# There are also five RBF functions used internally.
# You can experiment with different numbers of internal RBF functions.
# However, the input and output must match the data set.
network = RbfNetwork(4, 4, 3)
network.reset()
def score_funct(x):
"""
The score function for Iris anneal.
@param x:
@return:
"""
global best_score
global input_data
global output_data
# Update the network's long term memory to the vector we need to score.
network.copy_memory(x)
# Loop over the training set and calculate the output for each.
actual_output = []
for input_data in training_input:
output_data = network.compute_regression(input_data)
actual_output.append(output_data)
# Calculate the error with MSE.
result = ErrorCalculation.mse(np.array(actual_output), training_ideal)
return result
# Perform the annealing
train = ContinuousACO(network.long_term_memory,score_funct,30)
train.display_iteration = True
train.train()
# Display the final validation. We show all of the iris data as well as the predicted species.
for i in range(0, len(training_input)):
input_data = training_input[i]
# Compute the output from the RBF network
output_data = network.compute_regression(input_data)
ideal_data = training_ideal[i]
# Decode the three output neurons into a class number.
class_id = norm.denorm_one_of_n(output_data)
print(str(input_data) + " -> " + inv_classes[class_id] + ", Ideal: " + ideal_species[i])
|
PeterLauris/aifh
|
vol2/vol2-python-examples/examples/example_aco_iris.py
|
Python
|
apache-2.0
| 20,865
|
[
"VisIt"
] |
c645d116c805472547a353b031081af124a6c3876d7c69c4c9e8a755077f1616
|
#!/usr/bin/env python
"""BWA/samtools pileup tools"""
from jip import *
@tool('bwa_index')
class BwaIndex():
"""\
Run the BWA indexer on a given reference genome
Usage:
bwa_index -r <reference>
Inputs:
-r, --reference The reference
"""
def validate(self):
self.add_output('output', "%s.bwt" % (self.reference))
def get_command(self):
return 'bwa index ${reference}'
@tool(inputs=['input', 'reference'])
def bwa_align(object):
"""\
Call the BWA aligner
usage:
bwa_align -r <reference> -i <input> [-o <output>]
Options:
-r, --reference <reference> The genomic reference index
-i, --input <input> The input reads
-o, --output <output> The output file
[default: stdout]
"""
return 'bwa aln -I -t 8 ${reference|ext} ${input} ${output|arg(">")}'
@tool(inputs=['input', 'alignment', 'reference'])
def bwa_sam(tool):
"""\
Convert output of the BWA aligner to SAM
usage:
bwa_sam [-p] -r <reference> -i <input> -a <alignment> [-o <output>]
Options:
-r, --reference <reference> The genomic reference file. This has to
be indexed already and the index must be
found next to the given .fa fasta file
-a, --alignment <alignment> The BWA alignment
-i, --input <input> The input reads
-o, --output <output> The output file
[default: stdout]
-p, --paired Paired-end reads
"""
if tool.paired:
return 'bwa sampe ${reference|ext} ${alignment} ${input} ${output|arg(">")}'
else:
return 'bwa samse ${reference|ext} ${alignment} ${input} ${output|arg(">")}'
@tool('sam2bam')
class Sam2Bam(object):
"""\
Convert output of the BWA aligner to SAM
usage:
sam2bam -i <input> -o <output>
Inputs:
-i, --input <input> The input reads
Outputs:
-o, --output <output> The output file
"""
def get_command(self):
return '''\
samtools view -bSu ${input} | samtools sort - ${output}
'''
@tool('duplicates')
class Duplicates(object):
"""\
Remove duplicates
usage:
duplicates -i <input> -o <output>
Options:
-i, --input <input> The input reads
-o, --output <output> The output file
"""
def validate(self):
self.add_output('output_metrics', self.output + '.metrics')
def get_command(self):
return '''\
java -Xmx1g -jar /apps/PICARD/1.95/MarkDuplicates.jar \
MAX_FILE_HANDLES_FOR_READ_ENDS_MAP=1000\
METRICS_FILE=${output_metrics}\
REMOVE_DUPLICATES=true \
ASSUME_SORTED=true \
VALIDATION_STRINGENCY=LENIENT \
INPUT=${input} \
OUTPUT=${output}
'''
@tool(
'bam_index',
ensure=[('input', '.*.bam')],
add_outputs=[('output', lambda s: s.options['input'] + '.bai')],
)
def bam_index(self):
"""\
Index a bam file
usage:
bam_index -i <input>
Options:
-i, --input <input> The input reads
"""
return '''samtools index ${input}'''
@tool('mpileup')
class Pileup(object):
def register(self, parser):
import sys
parser.description = "Run samtools mpileup on a sorted index bam file"
parser.add_argument('-i', '--input',
required=True,
help="Bam file index ending in .bam.bai")
parser.add_argument('-r', '--reference',
required=True,
help="The genomic reference")
parser.add_argument('-o', '--output',
default=sys.stdout,
help="Output file")
def validate(self):
self.ensure('input', '.*\.bam\.bai$', "Please specify a .bai index")
self.check_file('reference')
def get_command(self):
return '''
samtools mpileup -uf ${reference} ${input|ext} | \
bcftools view -bvcg - ${output|arg(">")}
'''
@pipeline('pileup')
class PileupPipeline(object):
"""\
Run BWA and samtools to align reads and create a pileup
usage:
pileup -r <reference> -i <input> -o <output>
Options:
-r, --reference <reference> The genomic reference file. This has to
be indexed already and the index must be
found next to the given .fa fasta file
-i, --input <input> The input reads
-o, --output <output> The output file
"""
def pipeline(self):
out = self.output
p = Pipeline()
ref = p.run('bwa_index', reference=self.reference)
align = p.run('bwa_align', input=self.input,
reference=ref, output="${out}.sai")
sam = p.run('bwa_sam', input=self.input,
reference=ref,
alignment=align,
output="${out}.sam")
bam = p.run('sam2bam', input=sam, output="${out}.bam")
dups = p.run('duplicates', input=bam, output="${out}.dedup.bam")
index = p.run('bam_index', input=dups)
pile = p.run('mpileup', input=index, reference="${ref|ext}",
output=out)
p.context(locals())
return p
|
thasso/pyjip
|
examples/bwa/pileup.py
|
Python
|
bsd-3-clause
| 5,658
|
[
"BWA"
] |
fc6168132d65b0a0da7b5d514985bed465f3a0c71dffdce8769b535d0decb12b
|
import numpy as np
from ase import Atoms
from gpaw import GPAW
from gpaw.xc.sic import SIC
from gpaw.test import equal
a = 7.0
atom = Atoms('N', magmoms=[3], cell=(a, a, a))
molecule = Atoms('N2', positions=[(0, 0, 0), (0, 0, 1.14)], cell=(a, a, a))
atom.center()
molecule.center()
calc = GPAW(xc=SIC(),
txt='n2.sic.new3b.txt',
setups='hgh')
atom.set_calculator(calc)
e1 = atom.get_potential_energy()
molecule.set_calculator(calc)
e2 = molecule.get_potential_energy()
F_ac = molecule.get_forces()
print 2 * e1 - e2
print F_ac
|
qsnake/gpaw
|
gpaw/test/scfsic_n2.py
|
Python
|
gpl-3.0
| 553
|
[
"ASE",
"GPAW"
] |
c9fda28a6e65e3817773930d0d8071bf601e84b1e5a3bfee817f175ce4488ca8
|
# gridDataFormats --- python modules to read and write gridded data
# Copyright (c) 2009-2014 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Lesser General Public License, version 3 or later.
# See the files COPYING and COPYING.LESSER for details.
"""
:mod:`gridData` -- Handling grids of data
=========================================
Overview
--------
This module contains classes that allow importing and exporting of
simple gridded data, A grid is an N-dimensional array that represents
a discrete mesh over a region of space. The array axes are taken to be
parallel to the cartesian axes of this space. Together with this array
we also store the edges, which are are (essentially) the cartesian
coordinates of the intersections of the grid (mesh) lines on the
axes. In this way the grid is anchored in space.
The :class:`~gridData.core.Grid` object can be resampled at arbitrary resolution (by
interpolating the data). Standard algebraic operations are defined for
grids on a point-wise basis (same as for :class:`numpy.ndarray`).
Description
-----------
The package reads grid data from files, makes them available as a
:class:`~gridData.core.Grid` object, and allows one to write out the data again.
A :class:`~gridData.core.Grid` consists of a rectangular, regular, N-dimensional
array of data. It contains
(1) The position of the array cell edges.
(2) The array data itself.
This is equivalent to knowing
(1) The origin of the coordinate system (i.e. which data cell
corresponds to (0,0,...,0)
(2) The spacing of the grid in each dimension.
(3) The data on a grid.
:class:`~gridData.core.Grid` objects have some convenient properties:
* The data is represented as a :class:`numpy.ndarray` and thus shares
all the advantages coming with this sophisticated and powerful
library.
* They can be manipulated arithmetically, e.g. one can simply add or
subtract two of them and get another one, or multiply by a
constant. Note that all operations are defined point-wise (see the
:mod:`numpy` documentation for details) and that only grids defined
on the same cell edges can be combined.
* A :class:`~gridData.core.Grid` object can also be created from within python code
e.g. from the output of the :func:`numpy.histogramdd` function.
* The representation of the data is abstracted from the format that
the files are saved in. This makes it straightforward to add
additional readers for new formats.
* The data can be written out again in formats that are understood by
other programs such as VMD or PyMOL.
Reading grid data files
-----------------------
Some Formats_ can be read directly from a file on disk::
g = Grid(filename)
*filename* could be, for instance, "density.dx".
Constructing a Grid
-------------------
Data from an n-dimensional array can be packaged as a :class:`~gridData.core.Grid`
for convenient handling (especially export to other formats). The
:class:`~gridData.core.Grid` class acts as a universal constructor::
g = Grid(ndarray, edges=edges) # from histogramdd
g = Grid(ndarray, origin=origin, delta=delta) # from arbitrary data
g.export(filename, format) # export to the desire format
See the doc string for :class:`~gridData.core.Grid` for details.
Formats
-------
The following formats are available (:ref:`supported-file-formats`):
:mod:`~gridData.OpenDX`
IBM's Data Explorer, http://www.opendx.org/
:mod:`~gridData.gOpenMol`
http://www.csc.fi/gopenmol/
pickle
python pickle file (:mod:`pickle`)
Exceptions
----------
.. autoexception:`gridDataWarning`
Examples
========
In most cases, only one class is important, the
:class:`~gridData.core.Grid`, so we just load this right away::
from gridData import Grid
Loading data
------------
From a OpenDX file::
g = Grid("density.dx")
From a gOpenMol PLT file::
g = Grid("density.plt")
From the output of :func:`numpy.histogramdd`::
import numpy
r = numpy.random.randn(100,3)
H, edges = np.histogramdd(r, bins = (5, 8, 4))
g = Grid(H, edges=edges)
For other ways to load data, see the docs for :class:`~gridData.core.Grid`.
Subtracting two densities
-------------------------
Assuming one has two densities that were generated on the same grid
positions, stored in files ``A.dx`` and ``B.dx``, one first reads the
data into two :class:`~gridData.core.Grid` objects::
A = Grid('A.dx')
B = Grid('B.dx')
Subtract A from B::
C = B - A
and write out as a dx file::
C.export('C.dx')
The resulting file ``C.dx`` can be visualized with any OpenDX-capable
viewer, or later read-in again.
Resampling
----------
Load data::
A = Grid('A.dx')
Interpolate with a cubic spline to twice the sample density::
A2 = A.resample_factor(2)
Downsample to half of the bins in each dimension::
Ahalf = A.resample_factor(0.5)
Resample to the grid of another density, B::
B = Grid('B.dx')
A_on_B = A.resample(B.edges)
or even simpler ::
A_on_B = A.resample(B)
.. Note:: The cubic spline generates region with values that did not
occur in the original data; in particular if the original data's
lowest value was 0 then the spline interpolation will probably
produce some values <0 near regions where the density changed
abruptly.
"""
__all__ = ['Grid', 'OpenDX','gOpenMol']
__version__ = '0.2.5'
class gridDataWarning(Warning):
"""Warns of a problem specific to the gridData module."""
pass
from .core import Grid
|
holocronweaver/GridDataFormats
|
gridData/__init__.py
|
Python
|
gpl-3.0
| 5,484
|
[
"PyMOL",
"VMD"
] |
e4a0662cfa2460fdd68739fa8916b4e818d0611df507ae0e6d313593d9019a69
|
###########################################################
# File: OpenStackEndpoint.py
# Author: A.T.
###########################################################
"""
OpenStackEndpoint is Endpoint base class implementation for the OpenStack cloud service.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
__RCSID__ = '$Id$'
import requests
import json
import base64
# DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from VMDIRAC.Resources.Cloud.Endpoint import Endpoint
from VMDIRAC.Resources.Cloud.KeystoneClient import KeystoneClient
from DIRAC.Core.Utilities.File import makeGuid
DEBUG = False
class OpenStackEndpoint(Endpoint):
""" OpenStack implementation of the Cloud Endpoint interface
"""
def __init__(self, parameters=None, bootstrapParameters=None):
"""
"""
super(OpenStackEndpoint, self).__init__(parameters=parameters,
bootstrapParameters=bootstrapParameters)
# logger
self.log = gLogger.getSubLogger('OpenStackEndpoint')
self.ks = None
self.flavors = {}
self.images = {}
self.networks = {}
self.computeURL = None
self.imageURL = None
self.networkURL = None
self.network = None
self.project = None
self.projectID = None
self.vmInfo = {}
self.initialized = False
result = self.initialize()
if result['OK']:
self.log.debug('OpenStackEndpoint created and validated')
else:
self.log.error(result['Message'])
def initialize(self):
self.caPath = self.parameters.get('CAPath', True)
self.network = self.parameters.get("Network")
self.project = self.parameters.get("Project")
keyStoneURL = self.parameters.get("AuthURL")
result = self.getProxyFileLocation()
if result['OK']:
self.parameters['Proxy'] = result['Value']
self.ks = KeystoneClient(keyStoneURL, self.parameters)
result = self.ks.getToken()
if not result['OK']:
return result
self.valid = True
self.token = result['Value']
self.computeURL = self.ks.computeURL
self.imageURL = self.ks.imageURL
self.networkURL = self.ks.networkURL
self.projectID = self.ks.projectID
self.log.verbose("Service interfaces:\ncompute %s,\nimage %s,\nnetwork %s" %
(self.computeURL, self.imageURL, self.networkURL))
result = self.getFlavors()
if not result['OK']:
self.valid = False
result = self.getImages()
if not result['OK']:
self.valid = False
self.getNetworks()
return result
def getFlavors(self):
if not self.computeURL or not self.token:
return S_ERROR('The endpoint object is not initialized')
url = "%s/flavors/detail" % self.computeURL
self.log.verbose("Getting flavors details on %s" % url)
result = requests.get(url,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
for flavor in output['flavors']:
self.flavors[flavor["name"]] = {"FlavorID": flavor['id'],
"RAM": flavor['ram'],
"NumberOfProcessors": flavor['vcpus']}
return S_OK(self.flavors)
def getImages(self):
if not self.imageURL or not self.token:
return S_ERROR('The endpoint object is not initialized')
result = requests.get("%s/v2/images" % self.imageURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
for image in output['images']:
self.images[image['name']] = {'id': image['id']}
return S_OK(self.images)
def getNetworks(self):
""" Get a network object corresponding to the networkName
:param str networkName: network name
:return: S_OK|S_ERROR network object in case of S_OK
"""
try:
result = requests.get("%s/v2.0/networks" % self.networkURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
except Exception as exc:
return S_ERROR('Cannot get networks: %s' % str(exc))
for network in output['networks']:
if network['project_id'] == self.projectID:
self.networks[network["name"]] = {"NetworkID": network["id"]}
return S_OK(self.networks)
def createInstances(self, vmsToSubmit):
outputDict = {}
for nvm in range(vmsToSubmit):
instanceID = makeGuid()[:8]
result = self.createInstance(instanceID)
if result['OK']:
nodeID = result['Value']
self.log.debug('Created VM instance %s/%s' % (nodeID, instanceID))
nodeDict = {}
nodeDict['InstanceID'] = instanceID
nodeDict['NumberOfProcessors'] = self.parameters["NumberOfProcessors"]
outputDict[nodeID] = nodeDict
else:
break
# We failed submission utterly
if not outputDict:
return S_ERROR('No VM submitted')
return S_OK(outputDict)
def createInstance(self, instanceID=''):
"""
This creates a VM instance for the given boot image
and creates a context script, taken the given parameters.
Successful creation returns instance VM
Boots a new node on the OpenStack server defined by self.endpointConfig. The
'personality' of the node is done by self.imageConfig. Both variables are
defined on initialization phase.
The node name has the following format:
<bootImageName><contextMethod><time>
:return: S_OK( ( nodeID, publicIP ) ) | S_ERROR
"""
if not self.initialized:
self.initialize()
imageID = self.parameters.get('ImageID')
if not imageID:
imageName = self.parameters.get('Image')
if not imageName:
return S_ERROR('No image name or ID is specified')
if not self.images:
result = self.getImages()
if not result['OK']:
return result
imageID = self.images.get(imageName)['id']
if not imageID:
return S_ERROR('Can not get ID for the image: %s' % imageName)
self.parameters['ImageID'] = imageID
if "Image" not in self.parameters:
for image in self.images:
if self.images[image]['id'] == imageID:
self.parameters['Image'] = image
flavorID = self.parameters.get('FlavorID')
if not flavorID:
flavor = self.parameters.get('FlavorName')
if not flavor:
return S_ERROR('No flavor name or ID is specified')
if not self.flavors:
result = self.getFlavors()
if not result['OK']:
return result
flavorID = self.flavors.get(flavor)["FlavorID"]
if not flavorID:
return S_ERROR('Can not get ID for the flavor: %s' % flavor)
numberOfProcessors = self.flavors.get(flavor)["NumberOfProcessors"]
self.parameters['FlavorID'] = flavorID
if "NumberOfProcessors" not in self.parameters:
self.parameters["NumberOfProcessors"] = numberOfProcessors
networkID = self.parameters.get('NetworkID')
if not networkID:
network = self.parameters.get('Network')
if not self.networks and self.networkURL:
result = self.getNetworks()
if not result['OK']:
return result
if network:
if network in self.networks:
networkID = self.networks[network]["NetworkID"]
elif self.networks:
randomNW = self.networks.keys()[0]
networkID = self.networks[randomNW]["NetworkID"]
if not networkID:
self.log.warn("Failed to get ID of the network interface")
self.parameters['VMUUID'] = instanceID
self.parameters['VMType'] = self.parameters.get('CEType', 'OpenStack')
result = self._createUserDataScript()
if not result['OK']:
return result
userDataCrude = str(result['Value'])
userData = base64.b64encode(userDataCrude)
headers = {"X-Auth-Token": self.token}
requestDict = {"server": {"user_data": userData,
"name": "DIRAC_%s" % instanceID,
"imageRef": imageID,
"flavorRef": flavorID}
}
# Some cloud sites do not expose network service interface, but some do
if networkID:
requestDict["server"]["networks"] = [{"uuid": networkID}]
# Allow the use of pre-uploaded SSH keys
osSSHKey = self.parameters.get('OSKeyName')
if osSSHKey:
requestDict["server"]["key_name"] = osSSHKey
# print "AT >>> user data", userDataCrude
# print "AT >>> requestDict", requestDict
# return S_ERROR()
try:
result = requests.post("%s/servers" % self.computeURL,
json=requestDict,
headers=headers,
verify=self.caPath)
except Exception as exc:
return S_ERROR('Exception creating VM: %s' % str(exc))
if result.status_code in [200, 201, 202, 203, 204]:
output = json.loads(result.text)
nodeID = output["server"]["id"]
return S_OK(nodeID)
else:
return S_ERROR('Error creating VM: %s' % result.text)
def getVMIDs(self):
""" Get all the VM IDs on the endpoint
:return: list of VM ids
"""
if not self.initialized:
result = self.initialize()
if not result['OK']:
return result
try:
response = requests.get("%s/servers" % self.computeURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as e:
return S_ERROR('Cannot connect to ' + str(self.computeURL) + ' (' + str(e) + ')')
output = json.loads(response.text)
idList = []
for server in output["servers"]:
idList.append(server['id'])
return S_OK(idList)
def getVMStatus(self, vmID):
"""
Get the status for a given node ID. libcloud translates the status into a digit
from 0 to 4 using a many-to-one relation ( ACTIVE and RUNNING -> 0 ), which
means we cannot undo that translation. It uses an intermediate states mapping
dictionary, SITEMAP, which we use here inverted to return the status as a
meaningful string. The five possible states are ( ordered from 0 to 4 ):
RUNNING, REBOOTING, TERMINATED, PENDING & UNKNOWN.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
:return: S_OK( status ) | S_ERROR
"""
if not self.initialized:
self.initialize()
result = self.getVMInfo(vmID)
if not result['OK']:
return result
output = result['Value']
return S_OK(output["server"])
def stopVM(self, nodeID):
"""
Given the node ID it gets the node details, which are used to destroy the
node
:param str uniqueId: openstack node id ( not uuid ! )
:return: S_OK | S_ERROR
"""
if not self.initialized:
self.initialize()
try:
response = requests.delete("%s/servers/%s" % (self.computeURL, nodeID),
headers={"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as e:
return S_ERROR('Cannot get node details for %s (' % nodeID + str(e) + ')')
if response.status_code == 204:
# VM stopped successfully
return S_OK(response.text)
elif response.status_code == 404:
# VM does not exist already
return S_OK(response.text)
else:
return S_ERROR(response.text)
def __getVMPortID(self, nodeID):
""" Get the port ID associated with the given VM
:param str nodeID: VM ID
:return: port ID
"""
if nodeID in self.vmInfo and 'portID' in self.vmInfo[nodeID]:
return S_OK(self.vmInfo[nodeID]['portID'])
# Get the port of my VM
try:
result = requests.get("%s/v2.0/ports" % self.networkURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
portID = None
for port in output['ports']:
if port['device_id'] == nodeID:
portID = port['id']
self.vmInfo.setdefault(nodeID, {})
self.vmInfo[nodeID]['portID'] = portID
except Exception as exc:
return S_ERROR('Cannot get ports: %s' % str(exc))
return S_OK(portID)
def assignFloatingIP(self, nodeID):
"""
Given a node, assign a floating IP from the ipPool defined on the imageConfiguration
on the CS.
:Parameters:
**node** - `libcloud.compute.base.Node`
node object with the vm details
:return: S_OK( public_ip ) | S_ERROR
"""
if not self.initialized:
self.initialize()
result = self.getVMFloatingIP(nodeID)
if result['OK']:
ip = result['Value']
if ip:
return S_OK(ip)
# Get the port of my VM
result = self.__getVMPortID(nodeID)
if not result['OK']:
return result
portID = result['Value']
# Get an available floating IP
try:
result = requests.get("%s/v2.0/floatingips" % self.networkURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
except Exception as e:
return S_ERROR('Cannot get floatingips')
fipID = None
for fip in output['floatingips']:
if fip['fixed_ip_address'] is None:
fipID = fip['id']
break
if fipID is None:
return S_ERROR('No floating IP available')
data = {"floatingip": {"port_id": portID}}
dataJson = json.dumps(data)
try:
result = requests.put("%s/v2.0/floatingips/%s" % (self.networkURL, fipID),
data=dataJson,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as e:
return S_ERROR('Cannot assign floating IP')
output = json.loads(result.text)
self.vmInfo.setdefault(nodeID, {})
self.vmInfo['floatingID'] = output['floatingip']['id']
output = json.loads(result.text)
self.vmInfo.setdefault(nodeID, {})
self.vmInfo['floatingID'] = output['floatingip']['id']
ip = output['floatingip']['floating_ip_address']
return S_OK(ip)
def getVMInfo(self, vmID):
try:
response = requests.get("%s/servers/%s" % (self.computeURL, vmID),
headers={"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as e:
return S_ERROR('Cannot get node details for %s (' % vmID + str(e) + ')')
if response.status_code == 404:
return S_ERROR("VM ID %s not found" % vmID)
output = json.loads(response.text)
if response.status_code == 403:
if 'forbidden' in output:
return S_ERROR("Cannot get VM info: %s" % output['forbidden'].get('message'))
else:
return S_ERROR("Cannot get VM info: access forbidden")
# Cache some info
if response.status_code == 200:
self.vmInfo.setdefault(vmID, {})
self.vmInfo[vmID]['imageID'] = output['server']['image']['id']
self.vmInfo[vmID]['flavorID'] = output['server']['flavor']['id']
return S_OK(output)
def getVMFloatingIP(self, nodeID):
result = self.getVMInfo(nodeID)
if not result['OK']:
return result
floatingIP = None
output = result['Value']
for network, addressList in output['server']['addresses'].items():
for address in addressList:
if address['OS-EXT-IPS:type'] == "floating":
floatingIP = address['addr']
return S_OK(floatingIP)
def deleteFloatingIP(self, nodeID, floatingIP=None):
"""
Deletes a floating IP <public_ip> from the server.
:param str publicIP: public IP to be deleted
:param object node: node to which IP is attached
:return: S_OK | S_ERROR
"""
if nodeID in self.vmInfo and "floatingID" in self.vmInfo[nodeID]:
fipID = self.vmInfo[nodeID]["floatingID"]
else:
result = self.getVMFloatingIP(nodeID)
if not result['OK']:
return result
ip = result['Value']
if ip is None:
return S_OK()
result = self.__getVMPortID(nodeID)
if not result['OK']:
return result
portID = result['Value']
# Get an available floating IP
try:
result = requests.get("%s/v2.0/floatingips" % self.networkURL,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
output = json.loads(result.text)
except Exception as e:
return S_ERROR('Cannot get floatingips')
fipID = None
for fip in output['floatingips']:
if fip['port_id'] == portID:
fipID = fip['id']
break
if not fipID:
return S_ERROR('Can not get the floating IP ID')
data = {"floatingip": {"port_id": None}}
dataJson = json.dumps(data)
try:
result = requests.put("%s/v2.0/floatingips/%s" % (self.networkURL, fipID),
data=dataJson,
headers={"X-Auth-Token": self.token},
verify=self.caPath)
except Exception as exc:
return S_ERROR('Cannot disassociate floating IP: %s' % str(exc))
if result.status_code == 200:
return S_OK(fipID)
else:
return S_ERROR("Cannot disassociate floating IP: %s" % result.text)
|
DIRACGrid/VMDIRAC
|
VMDIRAC/Resources/Cloud/OpenStackEndpoint.py
|
Python
|
gpl-3.0
| 17,476
|
[
"DIRAC"
] |
d0f3f2574a7a4f51ca523018ea17a9ec9fdac434f63211954722368c1defa147
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ORStools
A QGIS plugin
QGIS client to query openrouteservice
-------------------
begin : 2017-02-01
git sha : $Format:%H$
copyright : (C) 2021 by HeiGIT gGmbH
email : support@openrouteservice.heigit.org
***************************************************************************/
This plugin provides access to openrouteservice API functionalities
(https://openrouteservice.org), developed and
maintained by the openrouteservice team of HeiGIT gGmbH, Germany. By using
this plugin you agree to the ORS terms of service
(https://openrouteservice.org/terms-of-service/).
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import json
import os
import processing
import webbrowser
from qgis.core import (QgsProject,
QgsVectorLayer,
QgsTextAnnotation,
QgsMapLayerProxyModel)
from qgis.gui import QgsMapCanvasAnnotationItem
from PyQt5.QtCore import QSizeF, QPointF
from PyQt5.QtGui import QIcon, QTextDocument
from PyQt5.QtWidgets import (QAction,
QDialog,
QApplication,
QMenu,
QMessageBox,
QDialogButtonBox)
from ORStools import RESOURCE_PREFIX, PLUGIN_NAME, DEFAULT_COLOR, __version__, __email__, __web__, __help__
from ORStools.common import (client,
directions_core,
PROFILES,
PREFERENCES, )
from ORStools.gui import directions_gui
from ORStools.utils import exceptions, maptools, logger, configmanager, transform
from .ORStoolsDialogConfig import ORStoolsDialogConfigMain
from .ORStoolsDialogUI import Ui_ORStoolsDialogBase
from . import resources_rc
def on_config_click(parent):
"""Pop up provider config window. Outside of classes because it's accessed by multiple dialogs.
:param parent: Sets parent window for modality.
:type parent: QDialog
"""
config_dlg = ORStoolsDialogConfigMain(parent=parent)
config_dlg.exec_()
def on_help_click():
"""Open help URL from button/menu entry."""
webbrowser.open(__help__)
def on_about_click(parent):
"""Slot for click event of About button/menu entry."""
info = f'<b>ORS Tools</b> provides access to <a href="https://openrouteservice.org"' \
f' style="color: {DEFAULT_COLOR}">openrouteservice</a> routing functionalities.' \
f'<br><br>' \
f'<center>' \
f'<a href=\"https://heigit.org/de/willkommen\"><img src=\":/plugins/ORStools/img/logo_heigit_300.png\"/>' \
f'</a><br><br>' \
f'</center>' \
f'Author: HeiGIT gGmbH<br>' \
f'Email: <a href="mailto:Openrouteservice <{__email__}>">{__email__}</a><br>' \
f'Web: <a href="{__web__}">{__web__}</a><br>' \
f'Repo: <a href="https://github.com/GIScience/orstools-qgis-plugin">' \
f'github.com/GIScience/orstools-qgis-plugin</a><br>' \
f'Version: {__version__}'
QMessageBox.information(
parent,
f'About {PLUGIN_NAME}',
info
)
class ORStoolsDialogMain:
"""Defines all mandatory QGIS things about dialog."""
def __init__(self, iface):
"""
:param iface: the current QGIS interface
:type iface: Qgis.Interface
"""
self.iface = iface
self.project = QgsProject.instance()
self.first_start = True
# Dialogs
self.dlg = None
self.menu = None
self.actions = None
# noinspection PyUnresolvedReferences
def initGui(self):
"""Called when plugin is activated (on QGIS startup or when activated in Plugin Manager)."""
def create_icon(f):
"""
internal function to create action icons
:param f: file name of icon.
:type f: str
:returns: icon object to insert to QAction
:rtype: QIcon
"""
return QIcon(RESOURCE_PREFIX + f)
icon_plugin = create_icon('icon_orstools.png')
self.actions = [
QAction(
icon_plugin,
PLUGIN_NAME, # tr text
self.iface.mainWindow() # parent
),
# Config dialog
QAction(
create_icon('icon_settings.png'),
'Provider Settings',
self.iface.mainWindow()
),
# About dialog
QAction(
create_icon('icon_about.png'),
'About',
self.iface.mainWindow()
),
# Help page
QAction(
create_icon('icon_help.png'),
'Help',
self.iface.mainWindow()
)
]
# Create menu
self.menu = QMenu(PLUGIN_NAME)
self.menu.setIcon(icon_plugin)
self.menu.addActions(self.actions)
# Add menu to Web menu and make sure it exists and add icon to toolbar
self.iface.addPluginToWebMenu("_tmp", self.actions[2])
self.iface.webMenu().addMenu(self.menu)
self.iface.removePluginWebMenu("_tmp", self.actions[2])
self.iface.addWebToolBarIcon(self.actions[0])
# Connect slots to events
self.actions[0].triggered.connect(self._init_gui_control)
self.actions[1].triggered.connect(lambda: on_config_click(parent=self.iface.mainWindow()))
self.actions[2].triggered.connect(lambda: on_about_click(parent=self.iface.mainWindow()))
self.actions[3].triggered.connect(on_help_click)
def unload(self):
"""Called when QGIS closes or plugin is deactivated in Plugin Manager"""
self.iface.webMenu().removeAction(self.menu.menuAction())
self.iface.removeWebToolBarIcon(self.actions[0])
QApplication.restoreOverrideCursor()
del self.dlg
# @staticmethod
# def get_quota(provider):
# """
# Update remaining quota from env variables.
#
# :returns: remaining quota text to be displayed in GUI.
# :rtype: str
# """
#
# # Dirty hack out of laziness.. Prone to errors
# text = []
# for var in sorted(provider['ENV_VARS'].keys(), reverse=True):
# text.append(os.environ[var])
# return '/'.join(text)
def _init_gui_control(self):
"""Slot for main plugin button. Initializes the GUI and shows it."""
# Only populate GUI if it's the first start of the plugin within the QGIS session
# If not checked, GUI would be rebuilt every time!
if self.first_start:
self.first_start = False
self.dlg = ORStoolsDialog(self.iface, self.iface.mainWindow()) # setting parent enables modal view
# Make sure plugin window stays open when OK is clicked by reconnecting the accepted() signal
self.dlg.global_buttons.accepted.disconnect(self.dlg.accept)
self.dlg.global_buttons.accepted.connect(self.run_gui_control)
self.dlg.avoidpolygon_dropdown.setFilters(QgsMapLayerProxyModel.PolygonLayer)
# Populate provider box on window startup, since can be changed from multiple menus/buttons
providers = configmanager.read_config()['providers']
self.dlg.provider_combo.clear()
for provider in providers:
self.dlg.provider_combo.addItem(provider['name'], provider)
self.dlg.show()
def run_gui_control(self):
"""Slot function for OK button of main dialog."""
layer_out = QgsVectorLayer("LineString?crs=EPSG:4326", "Route_ORS", "memory")
layer_out.dataProvider().addAttributes(directions_core.get_fields())
layer_out.updateFields()
# Associate annotations with map layer, so they get deleted when layer is deleted
for annotation in self.dlg.annotations:
# Has the potential to be pretty cool: instead of deleting, associate with mapLayer
# , you can change order after optimization
# Then in theory, when the layer is remove, the annotation is removed as well
# Doesn't work though, the annotations are still there when project is re-opened
# annotation.setMapLayer(layer_out)
self.project.annotationManager().removeAnnotation(annotation)
self.dlg.annotations = []
provider_id = self.dlg.provider_combo.currentIndex()
provider = configmanager.read_config()['providers'][provider_id]
# if there are no coordinates, throw an error message
if not self.dlg.routing_fromline_list.count():
QMessageBox.critical(
self.dlg,
"Missing Waypoints",
"""
Did you forget to set routing waypoints?<br><br>
Use the 'Add Waypoint' button to add up to 50 waypoints.
"""
)
return
# if no API key is present, when ORS is selected, throw an error message
if not provider['key'] and provider['base_url'].startswith('https://api.openrouteservice.org'):
QMessageBox.critical(
self.dlg,
"Missing API key",
"""
Did you forget to set an <b>API key</b> for openrouteservice?<br><br>
If you don't have an API key, please visit https://openrouteservice.org/sign-up to get one. <br><br>
Then enter the API key for openrouteservice provider in Web ► ORS Tools ► Provider Settings or the
settings symbol in the main ORS Tools GUI, next to the provider dropdown."""
)
return
clnt = client.Client(provider)
clnt_msg = ''
directions = directions_gui.Directions(self.dlg)
params = None
try:
params = directions.get_parameters()
if self.dlg.optimization_group.isChecked():
if len(params['jobs']) <= 1: # Start/end locations don't count as job
QMessageBox.critical(
self.dlg,
"Wrong number of waypoints",
"""At least 3 or 4 waypoints are needed to perform routing optimization.
Remember, the first and last location are not part of the optimization.
"""
)
return
response = clnt.request('/optimization', {}, post_json=params)
feat = directions_core.get_output_features_optimization(response, params['vehicles'][0]['profile'])
else:
params['coordinates'] = directions.get_request_line_feature()
profile = self.dlg.routing_travel_combo.currentText()
# abort on empty avoid polygons layer
if 'options' in params and 'avoid_polygons' in params['options']\
and params['options']['avoid_polygons'] == {}:
QMessageBox.warning(
self.dlg,
"Empty layer",
"""
The specified avoid polygon(s) layer does not contain any features.
Please add polygons to the layer or uncheck avoid polygons.
"""
)
msg = "The request has been aborted!"
logger.log(msg, 0)
self.dlg.debug_text.setText(msg)
return
response = clnt.request('/v2/directions/' + profile + '/geojson', {}, post_json=params)
feat = directions_core.get_output_feature_directions(
response,
profile,
params['preference'],
directions.options
)
layer_out.dataProvider().addFeature(feat)
layer_out.updateExtents()
self.project.addMapLayer(layer_out)
# Update quota; handled in client module after successful request
# if provider.get('ENV_VARS'):
# self.dlg.quota_text.setText(self.get_quota(provider) + ' calls')
except exceptions.Timeout:
msg = "The connection has timed out!"
logger.log(msg, 2)
self.dlg.debug_text.setText(msg)
return
except (exceptions.ApiError,
exceptions.InvalidKey,
exceptions.GenericServerError) as e:
logger.log(f"{e.__class__.__name__}: {str(e)}", 2)
clnt_msg += f"<b>{e.__class__.__name__}</b>: ({str(e)})<br>"
raise
except Exception as e:
logger.log(f"{e.__class__.__name__}: {str(e)}", 2)
clnt_msg += f"<b>{e.__class__.__name__}</b>: {str(e)}<br>"
raise
finally:
# Set URL in debug window
if params:
clnt_msg += f'<a href="{clnt.url}">{clnt.url}</a><br>Parameters:<br>{json.dumps(params, indent=2)}'
self.dlg.debug_text.setHtml(clnt_msg)
class ORStoolsDialog(QDialog, Ui_ORStoolsDialogBase):
"""Define the custom behaviour of Dialog"""
def __init__(self, iface, parent=None):
"""
:param iface: QGIS interface
:type iface: QgisInterface
:param parent: parent window for modality.
:type parent: QDialog/QApplication
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self._iface = iface
self.project = QgsProject.instance() # invoke a QgsProject instance
self.map_crs = self._iface.mapCanvas().mapSettings().destinationCrs()
# Set things around the custom map tool
self.line_tool = None
self.last_maptool = self._iface.mapCanvas().mapTool()
self.annotations = []
# Set up env variables for remaining quota
os.environ["ORS_QUOTA"] = "None"
os.environ["ORS_REMAINING"] = "None"
# Populate combo boxes
self.routing_travel_combo.addItems(PROFILES)
self.routing_preference_combo.addItems(PREFERENCES)
# Change OK and Cancel button names
self.global_buttons.button(QDialogButtonBox.Ok).setText('Apply')
self.global_buttons.button(QDialogButtonBox.Cancel).setText('Close')
# Set up signals/slots
# Config/Help dialogs
self.provider_config.clicked.connect(lambda: on_config_click(self))
self.help_button.clicked.connect(on_help_click)
self.about_button.clicked.connect(lambda: on_about_click(parent=self._iface.mainWindow()))
self.provider_refresh.clicked.connect(self._on_prov_refresh_click)
# Routing tab
self.routing_fromline_map.clicked.connect(self._on_linetool_init)
self.routing_fromline_clear.clicked.connect(self._on_clear_listwidget_click)
# Batch
self.batch_routing_points.clicked.connect(lambda: processing.execAlgorithmDialog(
f'{PLUGIN_NAME}:directions_from_points_2_layers'))
self.batch_routing_point.clicked.connect(lambda: processing.execAlgorithmDialog(
f'{PLUGIN_NAME}:directions_from_points_1_layer'))
self.batch_routing_line.clicked.connect(lambda: processing.execAlgorithmDialog(
f'{PLUGIN_NAME}:directions_from_polylines_layer'))
self.batch_iso_point.clicked.connect(lambda: processing.execAlgorithmDialog(
f'{PLUGIN_NAME}:isochrones_from_point'))
self.batch_iso_layer.clicked.connect(lambda: processing.execAlgorithmDialog(
f'{PLUGIN_NAME}:isochrones_from_layer'))
self.batch_matrix.clicked.connect(lambda: processing.execAlgorithmDialog(f'{PLUGIN_NAME}:matrix_from_layers'))
def _on_prov_refresh_click(self):
"""Populates provider dropdown with fresh list from config.yml"""
providers = configmanager.read_config()['providers']
self.provider_combo.clear()
for provider in providers:
self.provider_combo.addItem(provider['name'], provider)
def _on_clear_listwidget_click(self):
"""Clears the contents of the QgsListWidget and the annotations."""
items = self.routing_fromline_list.selectedItems()
if items:
# if items are selected, only clear those
for item in items:
row = self.routing_fromline_list.row(item)
self.routing_fromline_list.takeItem(row)
if self.annotations:
self.project.annotationManager().removeAnnotation(self.annotations.pop(row))
else:
# else clear all items and annotations
self.routing_fromline_list.clear()
self._clear_annotations()
def _linetool_annotate_point(self, point, idx):
annotation = QgsTextAnnotation()
c = QTextDocument()
html = "<strong>" + str(idx) + "</strong>"
c.setHtml(html)
annotation.setDocument(c)
annotation.setFrameSizeMm(QSizeF(7, 5))
annotation.setFrameOffsetFromReferencePointMm(QPointF(1.3, 1.3))
annotation.setMapPosition(point)
annotation.setMapPositionCrs(self.map_crs)
return QgsMapCanvasAnnotationItem(annotation, self._iface.mapCanvas()).annotation()
def _clear_annotations(self):
"""Clears annotations"""
for annotation in self.annotations:
if annotation in self.project.annotationManager().annotations():
self.project.annotationManager().removeAnnotation(annotation)
self.annotations = []
def _on_linetool_init(self):
"""Hides GUI dialog, inits line maptool and add items to line list box."""
self.hide()
self.routing_fromline_list.clear()
# Remove all annotations which were added (if any)
self._clear_annotations()
self.line_tool = maptools.LineTool(self._iface.mapCanvas())
self._iface.mapCanvas().setMapTool(self.line_tool)
self.line_tool.pointDrawn.connect(lambda point, idx: self._on_linetool_map_click(point, idx))
self.line_tool.doubleClicked.connect(self._on_linetool_map_doubleclick)
def _on_linetool_map_click(self, point, idx):
"""Adds an item to QgsListWidget and annotates the point in the map canvas"""
transformer = transform.transformToWGS(self.map_crs)
point_wgs = transformer.transform(point)
self.routing_fromline_list.addItem(f"Point {idx}: {point_wgs.x():.6f}, {point_wgs.y():.6f}")
annotation = self._linetool_annotate_point(point, idx)
self.annotations.append(annotation)
self.project.annotationManager().addAnnotation(annotation)
def _on_linetool_map_doubleclick(self):
"""
Populate line list widget with coordinates, end line drawing and show dialog again.
"""
self.line_tool.pointDrawn.disconnect()
self.line_tool.doubleClicked.disconnect()
QApplication.restoreOverrideCursor()
self._iface.mapCanvas().setMapTool(self.last_maptool)
self.show()
|
nilsnolde/ORStools
|
ORStools/gui/ORStoolsDialog.py
|
Python
|
mit
| 19,930
|
[
"VisIt"
] |
cf6dc4f52be0a77dfa358346f220ecbbe8189d20f0452f3c2aefb3063c559829
|
"""
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Perform k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
----------------------
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroid. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid.
The minimization is achieved by iteratively reclassifying
the observations into clusters and recalculating the centroids until
a configuration is reached in which the centroids are stable. One can
also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and, vice versa, is often referred to as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be an M by N array, where the rows are
the observation vectors. The codebook is a k by N array, where the
ith row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh-tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from collections import deque
from scipy._lib._util import _asarray_validated
from scipy._lib.six import xrange
from scipy.spatial.distance import cdist
from . import _vq
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = obs.std(axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
c_code_book = code_book.astype(ct, copy=False)
if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
return _vq.vq(c_obs, c_code_book)
return py_vq(obs, code_book, check_finite=False)
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the Euclidean distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (e.g., columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation; its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
if obs.ndim != code_book.ndim:
raise ValueError("Observation and code_book should have the same rank")
if obs.ndim == 1:
obs = obs[:, np.newaxis]
code_book = code_book[:, np.newaxis]
dist = cdist(obs, code_book)
code = dist.argmin(axis=1)
min_dist = dist[np.arange(len(code)), code]
return code, min_dist
# py_vq2 was equivalent to py_vq
py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq')
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
The lowest distortion codebook found.
avg_dist
The average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.asarray(guess)
diff = np.inf
prev_avg_dists = deque([diff], maxlen=2)
while diff > thresh:
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book, check_finite=False)
prev_avg_dists.append(distort.mean(axis=-1))
# recalc code_book as centroids of associated obs
code_book, has_members = _vq.update_cluster_means(obs, obs_code,
code_book.shape[0])
code_book = code_book[has_members]
diff = prev_avg_dists[0] - prev_avg_dists[1]
return code_book, prev_avg_dists[1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the classification of the observations
into clusters and updates the cluster centroids until the position of
the centroids is stable over successive iterations. In this
implementation of the algorithm, the stability of the centroids is
determined by comparing the absolute value of the change in the average
Euclidean distance between the observations and their corresponding
centroids against a threshold. This yields
a code book mapping centroids to codes and vice versa.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to threshold.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The ith centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The mean (non-squared) Euclidean distance between the observations
passed and the centroids generated. Note the difference to the standard
definition of distortion in the context of the k-means algorithm, which
is the sum of the squared distances.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = np.random.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if iter < 1:
raise ValueError("iter must be at least 1, got %s" % iter)
# Determine whether a count (scalar) or an initial guess (array) was passed.
if not np.isscalar(k_or_guess):
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess.size < 1:
raise ValueError("Asked for 0 clusters. Initial book was %s" %
guess)
return _kmeans(obs, guess, thresh=thresh)
# k_or_guess is a scalar, now verify that it's an integer
k = int(k_or_guess)
if k != k_or_guess:
raise ValueError("If k_or_guess is a scalar, it must be an integer.")
if k < 1:
raise ValueError("Asked for %d clusters." % k)
# initialize best distance value to a large value
best_dist = np.inf
for i in xrange(iter):
# the initial code book is randomly selected from observations
guess = _kpoints(obs, k)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = np.random.choice(data.shape[0], size=k, replace=False)
return data[idx]
def _krandinit(data, k):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = data.mean(axis=0)
if data.ndim == 1:
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = x.dot(sVh)
else:
cov = np.atleast_2d(np.cov(data, rowvar=False))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = x.dot(np.linalg.cholesky(cov).T)
x += mu
return x
def _kpp(data, k):
""" Picks k points in the data based on the kmeans++ method.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
Returns
-------
init : ndarray
A 'k' by 'N' containing the initial centroids.
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
"""
dims = data.shape[1] if len(data.shape) > 1 else 1
init = np.ndarray((k, dims))
for i in range(k):
if i == 0:
init[i, :] = data[np.random.randint(dims)]
else:
D2 = np.array([min(
[np.inner(init[j]-x, init[j]-x) for j in range(i)]
) for x in data])
probs = D2/D2.sum()
cumprobs = probs.cumsum()
r = np.random.rand()
init[i, :] = data[np.searchsorted(cumprobs, r)]
return init
_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
def _missing_raise():
"""Raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> np.random.seed(12345678)
>>> a = np.random.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = np.random.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = np.random.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> np.random.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[-0.35770296, 5.31342524],
[ 2.32210289, -0.50551972],
[ 6.17653859, 4.16719247]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([52, 27, 21])
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError:
raise ValueError("Unknown missing method %r" % (missing,))
data = _asarray_validated(data, check_finite=check_finite)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if data.size < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or not np.isscalar(k):
code_book = np.array(k, copy=True)
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = len(code_book)
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(k)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, k))
elif nc != k:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError:
raise ValueError("Unknown init method %r" % (minit,))
else:
code_book = init_meth(data, k)
for i in xrange(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return code_book, label
|
jamestwebber/scipy
|
scipy/cluster/vq.py
|
Python
|
bsd-3-clause
| 27,263
|
[
"Gaussian"
] |
416da765e78c22965d176b05d3a7c1580be3cc9719f0a8985da6e376fff61199
|
#!/usr/bin/env priithon
import os, sys, time, csv
import Chromagnon as ch
import numpy as N
from Priithon.all import F, Mrc
from PriCommon import imgGeo
from imgio import mrcIO
METHODS=['quadrisection', 'logpolar', 'simplex']
#PARM_EXT = 'csv'
NOISE_STATS=['Gaussian', 'Poisson']
def repeat(fns, n=10):
outs = []
base = os.path.commonprefix(fns)
for i in range(n):
out = base + '_summary_%i.csv' % i
print(out)
outs = makeFiles(fns)
fns2 = fns + outs
print('iteration %i comparing %i images to make %s' % (i, len(fns2), out))
out = compare(fns2, out=out)
outs.append(out)
return outs
def makeFiles(fns, std=10, div_step=50, div_max=800):#1000):
"""
makes a series of images added Gaussain and Poisson noise
std: standard deviation for Gaussian, and mean=std*10 for Poisson
div_step: the step that the original image is divided while noise is added
div_max: the maximum value that the original image is divided
return output filenames
"""
outs = []
for fn in fns:
a = Mrc.bindFile(fn)
for ns in NOISE_STATS:
hdr = mrcIO.makeHdr_like(a.Mrc.hdr)
if ns == NOISE_STATS[0]:
noise = F.noiseArr(a.shape, stddev=std, mean=0)
else:
noise = F.poissonArr(a.shape, mean=std*10)
steps = range(div_step, div_max+div_step, div_step)
ag = [a/c + noise for c in steps]
for i, arr in enumerate(ag):
val = steps[i]
if hasattr(arr, "Mrc"):
del arr.Mrc
if arr.dtype == N.float64:
arr = arr.astype(N.float32)
hdr.PixelType = 2
out = a.Mrc.path + ns + '%04d' % val
Mrc.save(arr, out, ifExists='overwrite', hdr=hdr)
outs.append(out)
return outs
def compare(fns, methods=METHODS, refwave=0, tz={1:0}, t=0, out=None, imgsize=512, truth=[3,2,-0.5,1.001001,1.002004]):
if not out:
out = os.path.commonprefix(fns) + '_summary.csv'
truth = N.array(truth)
imgSize = N.array((0,imgsize), N.float32)
o = open(out, 'w')
cwtr = csv.writer(o)
cwtr.writerow(['name', 'NoiseModel', 'method', 'wave', 'noise', 'ty(um)', 'tx(um)', 'r', 'my', 'mx', 'dty(nm)', 'dtx(nm)', 'dr(nm)', 'dmy(nm)', 'dmx(nm)', 'total(nm)', 'timeTaken(sec)'])
for method in methods:
met = ch.alignfuncs.IF_FAILED[METHODS.index(method)]
#outs = []
#OLD_EXT = ch.chromformat.PARM_EXT
#ch.chromformat.PARM_EXT = PARM_EXT
old_stdout = sys.stdout
for fn in fns:
logfn = os.path.extsep.join((fn + '_' + method, 'log'))
sys.stdout = open(logfn, 'w')
#base = os.path.splitext(fn)[0]
name = os.path.basename(fn)#base)
if name[-1].isdigit():
for i in range(4):
if name[-(i+1)].isdigit():
noise = int(name[-(i+1):])
else:
break
else:
noise = 0
if 'Gaussian' in fn:
noiseModel = 'Gaussian'
elif 'Poisson' in fn:
noiseModel = 'Poisson'
else:
noiseModel = 'None'
if 'deconB' in fn:
wave = 442
elif 'deconG' in fn:
wave = 525
an = ch.aligner.Chromagnon(fn)
an.setIf_failed(met)
an.setMaxError(0.0000001)
#an.setParmSuffix('_' + method)
if refwave is not None:
an.setReferenceWave(refwave)
an.fixAlignParmWithCurrRefWave()
else:
an.findBestChannel()
an.setEchofunc(_echo)
an.setRefImg()
for w in range(an.img.nw):
if (w == an.refwave):
continue
if an.img.nz > 1:
img = an.img.get3DArr(w=w, t=t)
zs = N.round_(N.array(an.refzs)-tz[w]).astype(N.int)
if zs.max() >= an.img.nz:
zsbool = (zs < an.img.nz)
zsinds = N.nonzero(zsbool)[0]
zs = zs[zsinds]
imgyx = ch.alignfuncs.prep2D(img, zs=zs)
del img
else:
imgyx = an.img.getArr(w=w, t=t, z=0)
initguess = N.zeros((5,), N.float32)
#initguess[:3] = ret[w,1:4] # ty,tx,r
initguess[3:] = 1#ret[w,5:7] # my, mx
clk0 = time.clock()
try:
ty,tx,r,my,mx = ch.alignfuncs.iteration(imgyx, an.refyx, maxErr=an.maxErrYX, niter=an.niter, phaseContrast=an.phaseContrast, initguess=initguess, echofunc=an.echofunc, max_shift_pxl=an.max_shift_pxl, if_failed=an.if_failed)
except ZeroDivisionError:
if an.phaseContrast:
ty,tx,r,my,mx = ch.alignfuncs.iteration(imgyx, an.refyx, maxErr=an.maxErrYX, niter=an.niter, phaseContrast=False, initguess=initguess, echofunc=an.echofunc, max_shift_pxl=an.max_shift_pxl, if_failed=an.if_failed)
clk1 = time.clock()
tt = clk1-clk0
diff = N.abs(truth - (ty,tx,r,my,mx))
dy,dx = diff[:2] * (an.pxlsiz[1:] * 1000)
dr = (imgGeo.euclideanDist(imgGeo.rotate(imgSize, N.radians(diff[2])), imgSize))*(N.mean(an.pxlsiz[1:])*1000)
dmy = diff[3] * imgsize * an.pxlsiz[-2] * 1000
dmx = diff[4] * imgsize * an.pxlsiz[-1] * 1000
total = N.sqrt(dy**2 + dx**2 + dr**2 + dmy**2 + dmx**2)
cwtr.writerow([name, noiseModel, method, wave, noise, ty*an.pxlsiz[1], tx*an.pxlsiz[2], r, my, mx, dy, dx, dr, dmy, dmx, total, tt])
an.close()
sys.stdout.close()
del imgyx
del an
#ch.chromformat.PARM_EXT = OLD_EXT
#sys.stdout = old_stdout
sys.stdout = sys.__stdout__
o.close()
return out#s
compare.__doc__ = """
method: %s
refwave: None or channel index
return output files""" % str(METHODS)
def _echo(msg, skip_notify=False):
pass
def summarize(csvs, out=None):
if not out:
out = os.path.commomprefix(csvs)
o = open(out, 'w')
cwtr = csv.writer(o)
cwtr.writerow(['name', 'method', 'time', 'wave', 'tz', 'ty', 'tx', 'r', 'mz', 'my', 'mx', 'timeTaken'])
for fn in csvs:
base = os.path.splitext(fn)[0]
method = base.split('_')[-1]
name = os.path.basename(base[:-(len(method)+1)])
rd = ch.ChromagnonReader(fn)
row = rd.alignParms
if __name__ == '__main__':
print(sys.argv[1:])
print(repeat(sys.argv[1:]))
|
macronucleus/chromagnon
|
Chromagnon/test_compareReg.py
|
Python
|
mit
| 6,944
|
[
"Gaussian"
] |
4ba9aaa7bfe3ec89000d3ffc7c188d77e37f1bdb57818b1c773afc9ca30a20ac
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
import scipy.special
from pyscf import lib, gto, scf, dft, ao2mo, df
from pyscf.solvent import ddcosmo
from pyscf.solvent import _attach_solvent
from pyscf.symm import sph
def make_v_phi(mol, dm, r_vdw, lebedev_order):
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
natm = mol.natm
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(lebedev_order)
pmol = mol.copy()
v_phi = []
for ia in range(natm):
for i,c in enumerate(coords_1sph):
r = atom_coords[ia] + r_vdw[ia] * c
dr = atom_coords - r
v_nuc = (atom_charges / numpy.linalg.norm(dr, axis=1)).sum()
pmol.set_rinv_orig(r)
v_e = numpy.einsum('ij,ji', pmol.intor('int1e_rinv'), dm)
v_phi.append(v_nuc - v_e)
v_phi = numpy.array(v_phi).reshape(natm,-1)
return v_phi
def make_L(pcmobj, r_vdw, lebedev_order, lmax, eta=0.1):
mol = pcmobj.mol
natm = mol.natm
nlm = (lmax+1)**2
leb_coords, leb_weights = ddcosmo.make_grids_one_sphere(lebedev_order)
nleb_grid = leb_weights.size
atom_coords = mol.atom_coords()
Ylm_sphere = numpy.vstack(sph.real_sph_vec(leb_coords, lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
L_diag = numpy.zeros((natm,nlm))
p1 = 0
for l in range(lmax+1):
p0, p1 = p1, p1 + (l*2+1)
L_diag[:,p0:p1] = 4*numpy.pi/(l*2+1)
L_diag /= r_vdw.reshape(-1,1)
L = numpy.diag(L_diag.ravel()).reshape(natm,nlm,natm,nlm)
for ja in range(natm):
for ka in range(natm):
if ja == ka:
continue
vjk = r_vdw[ja] * leb_coords + atom_coords[ja] - atom_coords[ka]
v = lib.norm(vjk, axis=1)
tjk = v / r_vdw[ka]
sjk = vjk / v.reshape(-1,1)
Ys = sph.real_sph_vec(sjk, lmax, True)
# scale the weight, see JCTC 9, 3637, Eq (16)
wjk = pcmobj.regularize_xt(tjk, eta, r_vdw[ka])
wjk[fi[ja]>1] /= fi[ja,fi[ja]>1]
tt = numpy.ones_like(wjk)
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1) / r_vdw[ka]
p0, p1 = p1, p1 + (l*2+1)
val = numpy.einsum('n,xn,n,mn->xm', leb_weights, Ylm_sphere, wjk*tt, Ys[l])
L[ja,:,ka,p0:p1] += -fac * val
tt *= tjk
return L.reshape(natm*nlm,natm*nlm)
def make_psi(mol, dm, r_vdw, lmax):
grids = dft.gen_grid.Grids(mol)
atom_grids_tab = grids.gen_atomic_grids(mol)
grids.build()
ao = dft.numint.eval_ao(mol, grids.coords)
den = dft.numint.eval_rho(mol, ao, dm)
den *= grids.weights
natm = mol.natm
nlm = (lmax+1)**2
psi = numpy.empty((natm,nlm))
i1 = 0
for ia in range(natm):
xnj, w = atom_grids_tab[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + w.size
r = lib.norm(xnj, axis=1)
snj = xnj/r.reshape(-1,1)
Ys = sph.real_sph_vec(snj, lmax, True)
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
rr = numpy.zeros_like(r)
rr[r<=r_vdw[ia]] = r[r<=r_vdw[ia]]**l / r_vdw[ia]**(l+1)
rr[r> r_vdw[ia]] = r_vdw[ia]**l / r[r>r_vdw[ia]]**(l+1)
psi[ia,p0:p1] = -fac * numpy.einsum('n,n,mn->m', den[i0:i1], rr, Ys[l])
psi[ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia)
return psi
def make_vmat(pcm, r_vdw, lebedev_order, lmax, LX, LS):
mol = pcm.mol
grids = dft.gen_grid.Grids(mol)
atom_grids_tab = grids.gen_atomic_grids(mol)
grids.build()
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(lebedev_order)
ao = dft.numint.eval_ao(mol, grids.coords)
nao = ao.shape[1]
vmat = numpy.zeros((nao,nao))
i1 = 0
for ia in range(mol.natm):
xnj, w = atom_grids_tab[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + w.size
r = lib.norm(xnj, axis=1)
Ys = sph.real_sph_vec(xnj/r.reshape(-1,1), lmax, True)
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
rr = numpy.zeros_like(r)
rr[r<=r_vdw[ia]] = r[r<=r_vdw[ia]]**l / r_vdw[ia]**(l+1)
rr[r> r_vdw[ia]] = r_vdw[ia]**l / r[r>r_vdw[ia]]**(l+1)
eta_nj = fac * numpy.einsum('n,mn,m->n', rr, Ys[l], LX[ia,p0:p1])
vmat -= numpy.einsum('n,np,nq->pq', grids.weights[i0:i1] * eta_nj,
ao[i0:i1], ao[i0:i1])
atom_coords = mol.atom_coords()
Ylm_sphere = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
xi_nj = numpy.einsum('n,jn,xn,jx->jn', weights_1sph, ui, Ylm_sphere, LS)
pmol = mol.copy()
for ia in range(mol.natm):
for i,c in enumerate(coords_1sph):
r = atom_coords[ia] + r_vdw[ia] * c
pmol.set_rinv_orig(r)
vmat += pmol.intor('int1e_rinv') * xi_nj[ia,i]
return vmat
def make_B(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
mol = pcmobj.mol
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
max_memory = pcmobj.max_memory - lib.current_memory()[0]
blksize = int(max(max_memory*.9e6/8/nao**2, 400))
cav_coords = cav_coords[extern_point_idx]
int3c2e = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas,
mol._env, int3c2e)
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s2ij', cintopt=cintopt)
nao_pair = v_nj.shape[0]
v_phi = numpy.zeros((nao_pair, natm, ngrid_1sph))
v_phi[:,extern_point_idx] += v_nj
phi = numpy.einsum('n,xn,jn,ijn->ijx', weights_1sph, ylm_1sph, ui, v_phi)
Xvec = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi.reshape(-1,natm*nlm).T)
Xvec = Xvec.reshape(natm,nlm,nao_pair)
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = lib.pack_tril(numpy.einsum('gi,gj->gij', ao, aow))
psi = numpy.zeros((nao_pair, natm, nlm))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi[:,ia,p0:p1] = -fac * numpy.einsum('mn,ni->im', fak_pol[l], aopair[i0:i1])
B = lib.einsum('pnl,nlq->pq', psi, Xvec)
B = B + B.T
B = ao2mo.restore(1, B, nao)
return B
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.basis = '3-21g'
mol.verbose = 5
mol.output = '/dev/null'
mol.build()
def tearDownModule():
global mol
mol.stdout.close()
del mol
class KnownValues(unittest.TestCase):
def test_ddcosmo_scf(self):
mol = gto.M(atom=''' H 0 0 0 ''', charge=1, basis='sto3g', verbose=7,
output='/dev/null')
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 10
pcm.lebedev_order = 29
mf = ddcosmo.ddcosmo_for_scf(scf.RHF(mol), pcm)
mf.init_guess = '1e'
mf.run()
self.assertAlmostEqual(mf.e_tot, -0.1645636146393864, 9)
mol = gto.M(atom='''
6 0.000000 0.000000 -0.542500
8 0.000000 0.000000 0.677500
1 0.000000 0.935307 -1.082500
1 0.000000 -0.935307 -1.082500
''', basis='sto3g', verbose=7,
output='/dev/null')
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 6
pcm.lebedev_order = 17
mf = ddcosmo.ddcosmo_for_scf(scf.RHF(mol), pcm).run()
self.assertAlmostEqual(mf.e_tot, -112.35450855007909, 9)
def test_ddcosmo_scf_with_overwritten_attributes(self):
mf = ddcosmo.ddcosmo_for_scf(scf.RHF(mol))
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -75.57036436805902, 9)
mf.with_solvent.lebedev_order = 15
mf.with_solvent.lmax = 5
mf.with_solvent.eps = .5
mf.with_solvent.conv_tol = 1e-8
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -75.55326109712902, 9)
mf.with_solvent.grids.radi_method = dft.mura_knowles
mf.with_solvent.grids.atom_grid = {"H": (8, 50), "O": (8, 50),}
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -75.55216799624262, 9)
def test_make_ylm(self):
numpy.random.seed(1)
lmax = 6
r = numpy.random.random((100,3)) - numpy.ones(3)*.5
r = r / lib.norm(r,axis=1).reshape(-1,1)
ngrid = r.shape[0]
cosphi = r[:,2]
sinphi = (1-cosphi**2)**.5
costheta = numpy.ones(ngrid)
sintheta = numpy.zeros(ngrid)
costheta[sinphi!=0] = r[sinphi!=0,0] / sinphi[sinphi!=0]
sintheta[sinphi!=0] = r[sinphi!=0,1] / sinphi[sinphi!=0]
costheta[costheta> 1] = 1
costheta[costheta<-1] =-1
sintheta[sintheta> 1] = 1
sintheta[sintheta<-1] =-1
varphi = numpy.arccos(cosphi)
theta = numpy.arccos(costheta)
theta[sintheta<0] = 2*numpy.pi - theta[sintheta<0]
ylmref = []
for l in range(lmax+1):
ylm = numpy.empty((l*2+1,ngrid))
ylm[l] = scipy.special.sph_harm(0, l, theta, varphi).real
for m in range(1, l+1):
f1 = scipy.special.sph_harm(-m, l, theta, varphi)
f2 = scipy.special.sph_harm( m, l, theta, varphi)
# complex to real spherical functions
if m % 2 == 1:
ylm[l-m] = (-f1.imag - f2.imag) / numpy.sqrt(2)
ylm[l+m] = ( f1.real - f2.real) / numpy.sqrt(2)
else:
ylm[l-m] = (-f1.imag + f2.imag) / numpy.sqrt(2)
ylm[l+m] = ( f1.real + f2.real) / numpy.sqrt(2)
if l == 1:
ylm = ylm[[2,0,1]]
ylmref.append(ylm)
ylmref = numpy.vstack(ylmref)
ylm = numpy.vstack(sph.real_sph_vec(r, lmax, True))
self.assertTrue(abs(ylmref - ylm).max() < 1e-14)
def test_L_x(self):
pcm = ddcosmo.DDCOSMO(mol)
r_vdw = ddcosmo.get_atomic_radii(pcm)
n = mol.natm * (pcm.lmax+1)**2
Lref = make_L(pcm, r_vdw, pcm.lebedev_order, pcm.lmax, pcm.eta).reshape(n,n)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
fi = ddcosmo.make_fi(pcm, r_vdw)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi).reshape(n,n)
numpy.random.seed(1)
x = numpy.random.random(n)
self.assertTrue(abs(Lref.dot(n)-L.dot(n)).max() < 1e-12)
def test_phi(self):
pcm = ddcosmo.DDCOSMO(mol)
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
numpy.random.seed(1)
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
v_phi = make_v_phi(mol, dm, r_vdw, pcm.lebedev_order)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
phi = -numpy.einsum('n,xn,jn,jn->jx', weights_1sph, ylm_1sph, ui, v_phi)
phi1 = ddcosmo.make_phi(pcm, dm, r_vdw, ui, ylm_1sph)
self.assertTrue(abs(phi - phi1).max() < 1e-12)
def test_psi_vmat(self):
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 2
pcm.eps = 0
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
grids = dft.gen_grid.Grids(mol).build()
pcm.grids = grids
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
numpy.random.seed(1)
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
natm = mol.natm
nlm = (pcm.lmax+1)**2
LX = numpy.random.random((natm,nlm))
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
psi, vmat = ddcosmo.make_psi_vmat(pcm, dm, r_vdw, ui,
ylm_1sph, cached_pol, LX, L)[:2]
psi_ref = make_psi(pcm.mol, dm, r_vdw, pcm.lmax)
self.assertAlmostEqual(abs(psi_ref - psi).max(), 0, 12)
LS = numpy.linalg.solve(L.reshape(natm*nlm,-1).T,
psi_ref.ravel()).reshape(natm,nlm)
vmat_ref = make_vmat(pcm, r_vdw, pcm.lebedev_order, pcm.lmax, LX, LS)
self.assertAlmostEqual(abs(vmat_ref - vmat).max(), 0, 12)
def test_B_dot_x(self):
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 2
pcm.eps = 0
natm = mol.natm
nao = mol.nao
nlm = (pcm.lmax+1)**2
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
grids = dft.gen_grid.Grids(mol).run(level=0)
pcm.grids = grids
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
B = make_B(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
numpy.random.seed(19)
dm = numpy.random.random((2,nao,nao))
Bx = numpy.einsum('ijkl,xkl->xij', B, dm)
phi = ddcosmo.make_phi(pcm, dm, r_vdw, ui, ylm_1sph, with_nuc=False)
Xvec = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi.reshape(-1,natm*nlm).T)
Xvec = Xvec.reshape(natm,nlm,-1).transpose(2,0,1)
psi, vref, LS = ddcosmo.make_psi_vmat(pcm, dm, r_vdw, ui, ylm_1sph,
cached_pol, Xvec, L, with_nuc=False)
self.assertAlmostEqual(abs(Bx - vref).max(), 0, 12)
e1 = numpy.einsum('nij,nij->n', psi, Xvec)
e2 = numpy.einsum('nij,nij->n', phi, LS)
e3 = numpy.einsum('nij,nij->n', dm, vref) * .5
self.assertAlmostEqual(abs(e1-e2).max(), 0, 12)
self.assertAlmostEqual(abs(e1-e3).max(), 0, 12)
vmat = pcm._B_dot_x(dm)
self.assertEqual(vmat.shape, (2,nao,nao))
self.assertAlmostEqual(abs(vmat-vref*.5).max(), 0, 12)
self.assertAlmostEqual(lib.fp(vmat), -17.383712106418606, 12)
def test_vmat(self):
mol = gto.M(atom='H 0 0 0; H 0 1 1.2; H 1. .1 0; H .5 .5 1', verbose=0)
pcmobj = ddcosmo.DDCOSMO(mol)
f = pcmobj.as_solver()
nao = mol.nao_nr()
numpy.random.seed(1)
dm1 = numpy.random.random((nao,nao))
dm1 = dm1 + dm1.T
e0, vmat0 = f(dm1)
dx = 0.0001
vmat1 = numpy.zeros_like(dm1)
for i in range(nao):
for j in range(i):
dm1[i,j] += dx
dm1[j,i] += dx
e1 = f(dm1)[0]
vmat1[i,j] = vmat1[j,i] = (e1 - e0) / (dx*2)
dm1[i,j] -= dx
dm1[j,i] -= dx
dm1[i,i] += dx
e1 = f(dm1)[0]
vmat1[i,i] = (e1 - e0) / dx
dm1[i,i] -= dx
self.assertAlmostEqual(abs(vmat0-vmat1).max(), 0, 4)
def test_as_scanner(self):
mol = gto.M(atom='''
6 0.000000 0.000000 -0.542500
8 0.000000 0.000000 0.677500
1 0.000000 0.935307 -1.082500
1 0.000000 -0.935307 -1.082500
''', basis='sto3g', verbose=7,
output='/dev/null')
mf_scanner = ddcosmo.ddcosmo_for_scf(scf.RHF(mol)).as_scanner()
mf_scanner(mol)
self.assertEqual(mf_scanner.with_solvent.grids.coords.shape, (48212, 3))
mf_scanner('H 0. 0. 0.; H 0. 0. .9')
self.assertEqual(mf_scanner.with_solvent.grids.coords.shape, (20048, 3))
h2 = gto.M(atom='H 0. 0. 0.; H 0. 0. .9', basis='sto3g', verbose=7,
output='/dev/null')
mf_h2 = ddcosmo.ddcosmo_for_scf(scf.RHF(h2)).run()
self.assertAlmostEqual(mf_h2.e_tot, mf_scanner.e_tot, 9)
def test_newton_rohf(self):
mf = mol.ROHF(max_memory=0).ddCOSMO()
mf = mf.newton()
e = mf.kernel()
self.assertAlmostEqual(e, -75.570364368046086, 9)
mf = mol.RHF().ddCOSMO()
e = mf.kernel()
self.assertAlmostEqual(e, -75.570364368046086, 9)
def test_convert_scf(self):
mf = mol.RHF().ddCOSMO()
mf = mf.to_uhf()
self.assertTrue(isinstance(mf, scf.uhf.UHF))
self.assertTrue(isinstance(mf, _attach_solvent._Solvation))
def test_reset(self):
mol1 = gto.M(atom='H 0 0 0; H 0 0 .9', basis='cc-pvdz')
mf = scf.RHF(mol).density_fit().ddCOSMO().newton()
mf.reset(mol1)
self.assertTrue(mf.mol is mol1)
self.assertTrue(mf.with_df.mol is mol1)
self.assertTrue(mf.with_solvent.mol is mol1)
self.assertTrue(mf._scf.with_df.mol is mol1)
self.assertTrue(mf._scf.with_solvent.mol is mol1)
def test_rhf_tda(self):
# TDA with equilibrium_solvation
mf = mol.RHF().ddCOSMO().run()
td = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
ref = numpy.array([0.3014315117408341, 0.358844688787903, 0.3951664712235241])
self.assertAlmostEqual(abs(ref - td.e).max(), 0, 8)
# TDA without equilibrium_solvation
mf = mol.RHF().ddCOSMO().run()
td = mf.TDA().ddCOSMO().run()
ref = numpy.array([0.3016104587222408, 0.358896882513815, 0.4004977667270891])
self.assertAlmostEqual(abs(ref - td.e).max(), 0, 8)
# TODO: add tests for direct-scf, ROHF, ROKS, .newton(), and their mixes
if __name__ == "__main__":
print("Full Tests for ddcosmo")
unittest.main()
|
gkc1000/pyscf
|
pyscf/solvent/test/test_ddcosmo.py
|
Python
|
apache-2.0
| 19,467
|
[
"PySCF"
] |
0f4f56a7cbbff5958c485c6de368efc3cd13932f0f0f93e0c1dc4ec559d18f57
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the Cp2k output parser along with a few other functions for parsing cp2k-related
outputs.
"""
import glob
import logging
import os
import re
import warnings
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.json import jsanitize
from monty.re import regrep
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos, add_densities
from pymatgen.io.cp2k.sets import Cp2kInput
from pymatgen.io.cp2k.utils import _postprocessor, natural_keys
from pymatgen.io.xyz import XYZ
__author__ = "Nicholas Winner"
__version__ = "0.3"
__status__ = "Development"
logger = logging.getLogger(__name__)
_hartree_to_ev_ = 2.72113838565563e01
_static_run_names_ = [
"ENERGY",
"ENERGY_FORCE",
"WAVEFUNCTION_OPTIMIZATION",
"WFN_OPT",
]
class Cp2kOutput:
"""
Class for parsing output file from CP2K. The CP2K output file is very flexible in the way that it is returned.
This class will automatically parse parameters that should always be present, but other parsing features may be
called depending on the run type.
"""
def __init__(self, filename, verbose=False, auto_load=False):
"""
Initialize the Cp2kOutput object.
Args:
filename: (str) Name of the CP2K output file to parse
verbose: (bool) Whether or not to parse with verbosity (will parse lots of data that may not be useful)
auto_load (bool): Whether or not to automatically load basic info like energies and structures.
"""
# IO Info
self.filename = filename
self.dir = os.path.dirname(filename)
self.filenames = {}
self.parse_files()
self.data = {}
# Material properties/results
self.input = None
self.initial_structure = None
self.lattice = None
self.final_structure = None
self.composition = None
self.efermi = None
self.vbm = None
self.cbm = None
self.band_gap = None
self.structures = []
self.ionic_steps = []
# parse the basic run parameters always
self.parse_cp2k_params()
self.parse_input() # parse the input file
self.parse_global_params() # Always present, parse the global parameters, most important is what run type
self.parse_dft_params() # Present so long as a DFT calculation was performed
self.parse_scf_params()
self.parse_atomic_kind_info()
# Auto-load will load the most crucial data into the data attribute
if auto_load:
self.ran_successfully() # Only if job completed. No info about convergence etc.
self.convergence() # Checks to see if job converged
self.parse_initial_structure() # Get the initial structure by parsing lattice and then parsing coords
self.parse_structures() # collect all structures from the run
self.parse_energies() # get total energy for each ionic step
self.parse_forces() # get forces on all atoms (in order), if available
self.parse_stresses() # get stress tensor and total stress at each ionic step, if available
self.parse_ionic_steps() # collect energy, forces, and total stress into ionic steps variable
self.parse_dos()
self.parse_mo_eigenvalues() # Get the eigenvalues of the MOs (for finding gaps, VBM, CBM)
self.parse_homo_lumo() # Get the HOMO LUMO gap as printed after the mo eigenvalues
self.parse_timing() # Get timing info (includes total CPU time consumed, but also much more)
# TODO: Is this the best way to implement? Should there just be the option to select each individually?
if verbose:
self.parse_scf_opt()
self.parse_opt_steps()
self.parse_total_numbers()
self.parse_mulliken()
self.parse_hirshfeld()
@property
def cp2k_version(self):
"""
The cp2k version used in the calculation
"""
return self.data.get("cp2k_version", None)
@property
def completed(self):
"""
Did the calculation complete
"""
c = self.data.get("completed", False)
if c:
return c[0][0]
return c
@property
def num_warnings(self):
"""
How many warnings showed up during the run
"""
return self.data.get("num_warnings", 0)
@property
def run_type(self):
"""
What type of run (Energy, MD, etc.) was performed
"""
return self.data.get("global").get("Run_type")
@property
def project_name(self):
"""
What project name was used for this calculation
"""
return self.data.get("global").get("project_name")
@property
def spin_polarized(self):
"""
Was the calculation spin polarized
"""
if ("UKS" or "UNRESTRICTED_KOHN_SHAM" or "LSD" or "SPIN_POLARIZED") in self.data["dft"].values():
return True
return False
@property
def is_metal(self):
"""
Was a band gap found? i.e. is it a metal
"""
if self.band_gap is None:
return True
if self.band_gap <= 0:
return True
return False
def parse_files(self):
"""
Identify files present in the directory with the cp2k output file. Looks for trajectories, dos, and cubes
"""
pdos = glob.glob(os.path.join(self.dir, "*pdos*"))
self.filenames["PDOS"] = []
self.filenames["LDOS"] = []
for p in pdos:
if p.split("/")[-1].__contains__("list"):
self.filenames["LDOS"].append(p)
else:
self.filenames["PDOS"].append(p)
self.filenames["trajectory"] = glob.glob(os.path.join(self.dir, "*pos*.xyz*"))
self.filenames["forces"] = glob.glob(os.path.join(self.dir, "*frc*.xyz*"))
self.filenames["stress"] = glob.glob(os.path.join(self.dir, "*stress*"))
self.filenames["cell"] = glob.glob(os.path.join(self.dir, "*.cell*"))
self.filenames["electron_density"] = glob.glob(os.path.join(self.dir, "*ELECTRON_DENSITY*.cube*"))
self.filenames["spin_density"] = glob.glob(os.path.join(self.dir, "*SPIN_DENSITY*.cube*"))
self.filenames["v_hartree"] = glob.glob(os.path.join(self.dir, "*hartree*.cube*"))
self.filenames["v_hartree"].sort(key=natural_keys)
restart = glob.glob(os.path.join(self.dir, "*restart*"))
self.filenames["restart.bak"] = []
for r in restart:
if r.split("/")[-1].__contains__("bak"):
self.filenames["restart.bak"].append(r)
else:
self.filenames["restart"] = r
wfn = glob.glob(os.path.join(self.dir, "*wfn*"))
self.filenames["wfn.bak"] = []
for w in wfn:
if w.split("/")[-1].__contains__("bak"):
self.filenames["wfn.bak"].append(w)
else:
self.filenames["wfn"] = w
def parse_structures(self, trajectory_file=None, lattice_file=None):
"""
Parses the structures from a cp2k calculation. Static calculations simply use the initial structure.
For calculations with ionic motion, the function will look for the appropriate trajectory and lattice
files based on naming convention. If no file is given, and no file is found, it is assumed
that the lattice/structure remained constant, and the initial lattice/structure is used.
Cp2k does not output the trajectory in the main output file by default, so non static calculations have to
reference the trajectory file.
"""
if lattice_file is None:
if len(self.filenames["cell"]) == 0:
lattice = self.parse_cell_params()
elif len(self.filenames["cell"]) == 1:
latfile = np.loadtxt(self.filenames["cell"][0])
lattice = (
[l[2:11].reshape(3, 3) for l in latfile] if len(latfile.shape) > 1 else latfile[2:11].reshape(3, 3)
)
lattice.append(lattice[-1]) # TODO is this always needed? from re-eval at minimum
else:
raise FileNotFoundError("Unable to automatically determine lattice file. More than one exist.")
else:
latfile = np.loadtxt(lattice_file)
lattice = [l[2:].reshape(3, 3) for l in latfile]
if trajectory_file is None:
if len(self.filenames["trajectory"]) == 0:
self.structures = []
self.structures.append(self.parse_initial_structure())
self.final_structure = self.structures[-1]
elif len(self.filenames["trajectory"]) == 1:
mols = XYZ.from_file(self.filenames["trajectory"][0]).all_molecules
self.structures = []
for m, l in zip(mols, lattice):
self.structures.append(
Structure(
lattice=l,
coords=[s.coords for s in m.sites],
species=[s.specie for s in m.sites],
coords_are_cartesian=True,
)
)
self.final_structure = self.structures[-1]
else:
raise FileNotFoundError("Unable to automatically determine trajectory file. More than one exist.")
else:
mols = XYZ.from_file(trajectory_file).all_molecules
self.structures = []
for m, l in zip(mols, lattice):
self.structures.append(
Structure(
lattice=l,
coords=[s.coords for s in m.sites],
species=[s.specie for s in m.sites],
coords_are_cartesian=True,
)
)
self.final_structure = self.structures[-1]
self.final_structure.set_charge(self.initial_structure.charge)
def parse_initial_structure(self):
"""
Parse the initial structure from the main cp2k output file
"""
pattern = re.compile(r"- Atoms:\s+(\d+)")
patterns = {"num_atoms": pattern}
self.read_pattern(
patterns=patterns,
reverse=False,
terminate_on_match=True,
postprocess=int,
)
coord_table = []
with zopen(self.filename, "rt") as f:
while True:
line = f.readline()
if "Atom Kind Element X Y Z Z(eff) Mass" in line:
f.readline()
for i in range(self.data["num_atoms"][0][0]):
coord_table.append(f.readline().split())
break
lattice = self.parse_cell_params()
gs = {}
for k in self.data["atomic_kind_info"].values():
if k["pseudo_potential"].upper() == "NONE":
gs[k["kind_number"]] = True
else:
gs[k["kind_number"]] = False
self.initial_structure = Structure(
lattice[0],
species=[i[2] for i in coord_table],
coords=[[float(i[4]), float(i[5]), float(i[6])] for i in coord_table],
coords_are_cartesian=True,
site_properties={"ghost": [gs.get(int(i[1])) for i in coord_table]},
)
self.initial_structure.set_charge(self.input["FORCE_EVAL"]["DFT"].get("CHARGE", [0])[0])
self.composition = self.initial_structure.composition
return self.initial_structure
def ran_successfully(self):
"""
Sanity checks that the program ran successfully. Looks at the bottom of the CP2K output file
for the "PROGRAM ENDED" line, which is printed when successfully ran. Also grabs the number
of warnings issued.
"""
program_ended_at = re.compile(r"PROGRAM ENDED AT\s+(\w+)")
num_warnings = re.compile(r"The number of warnings for this run is : (\d+)")
self.read_pattern(
patterns={"completed": program_ended_at},
reverse=True,
terminate_on_match=True,
postprocess=bool,
)
self.read_pattern(
patterns={"num_warnings": num_warnings},
reverse=True,
terminate_on_match=True,
postprocess=int,
)
if not self.completed:
raise ValueError("The provided CP2K job did not finish running! Cannot parse the file reliably.")
def convergence(self):
"""
Check whether or not the SCF and geometry optimization cycles converged.
"""
# SCF Loops
uncoverged_inner_loop = re.compile(r"(Leaving inner SCF loop)")
scf_converged = re.compile(r"(SCF run converged)|(SCF run NOT converged)")
self.read_pattern(
patterns={
"uncoverged_inner_loop": uncoverged_inner_loop,
"scf_converged": scf_converged,
},
reverse=True,
terminate_on_match=False,
postprocess=bool,
)
for i, x in enumerate(self.data["scf_converged"]):
if x[0]:
self.data["scf_converged"][i] = True
else:
self.data["scf_converged"][i] = False
# GEO_OPT
geo_opt_not_converged = re.compile(r"(MAXIMUM NUMBER OF OPTIMIZATION STEPS REACHED)")
geo_opt_converged = re.compile(r"(GEOMETRY OPTIMIZATION COMPLETED)")
self.read_pattern(
patterns={
"geo_opt_converged": geo_opt_converged,
"geo_opt_not_converged": geo_opt_not_converged,
},
reverse=True,
terminate_on_match=True,
postprocess=bool,
)
if not all(self.data["scf_converged"]):
warnings.warn(
"There is at least one unconverged SCF cycle in the provided cp2k calculation",
UserWarning,
)
if any(self.data["geo_opt_not_converged"]):
warnings.warn("Geometry optimization did not converge", UserWarning)
def parse_energies(self):
"""
Get the total energy from the output file
"""
toten_pattern = re.compile(r"Total FORCE_EVAL.*\s(-?\d+.\d+)")
self.read_pattern(
{"total_energy": toten_pattern},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
self.data["total_energy"] = np.multiply(self.data.get("total_energy", []), _hartree_to_ev_)
self.final_energy = self.data.get("total_energy", [])[-1][-1]
def parse_forces(self):
"""
Get the forces from the output file
"""
if len(self.filenames["forces"]) == 1:
self.data["forces"] = [
[list(atom.coords) for atom in step]
for step in XYZ.from_file(self.filenames["forces"][0]).all_molecules
]
else:
header_pattern = r"ATOMIC FORCES.+Z"
row_pattern = r"\s+\d+\s+\d+\s+\w+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer_pattern = r"SUM OF ATOMIC FORCES"
self.data["forces"] = self.read_table_pattern(
header_pattern=header_pattern,
row_pattern=row_pattern,
footer_pattern=footer_pattern,
postprocess=_postprocessor,
last_one_only=False,
)
def parse_stresses(self):
"""
Get the stresses from the output file.
"""
if len(self.filenames["stress"]) == 1:
dat = np.loadtxt(self.filenames["stress"][0], skiprows=1)
self.data["stress_tensor"] = [[list(d[2:5]), list(d[5:8]), list(d[8:11])] for d in dat]
else:
header_pattern = r"STRESS TENSOR.+Z"
row_pattern = r"\s+\w+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer_pattern = r"^$"
self.data["stress_tensor"] = self.read_table_pattern(
header_pattern=header_pattern,
row_pattern=row_pattern,
footer_pattern=footer_pattern,
postprocess=_postprocessor,
last_one_only=False,
)
trace_pattern = re.compile(r"Trace\(stress tensor.+(-?\d+\.\d+E?-?\d+)")
self.read_pattern(
{"stress": trace_pattern},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
def parse_ionic_steps(self):
"""
Parse the ionic step info
"""
self.ionic_steps = []
# TODO: find a better workaround. Currently when optimization is done there
# is an extra scf step before the optimization starts causing size difference
if len(self.structures) + 1 == len(self.data["total_energy"]):
self.data["total_energy"] = self.data["total_energy"][1:]
for i in range(len(self.data["total_energy"])):
self.ionic_steps.append({})
try:
self.ionic_steps[i]["E"] = self.data["total_energy"][i][0]
except (TypeError, IndexError):
warnings.warn("No total energies identified! Check output file")
try:
self.ionic_steps[i]["forces"] = self.data["forces"][i]
except (TypeError, IndexError):
pass
try:
self.ionic_steps[i]["stress_tensor"] = self.data["stress_tensor"][i][0]
except (TypeError, IndexError):
pass
try:
self.ionic_steps[i]["structure"] = self.structures[i]
except (TypeError, IndexError):
warnings.warn("Structure corresponding to this ionic step was not found!")
def parse_cp2k_params(self):
"""
Parse the CP2K general parameters from CP2K output file into a dictionary.
"""
version = re.compile(r"\s+CP2K\|.+(\d\.\d)")
input_file = re.compile(r"\s+CP2K\|\s+Input file name\s+(.+)$")
self.read_pattern(
{"cp2k_version": version, "input_filename": input_file},
terminate_on_match=True,
reverse=False,
postprocess=_postprocessor,
)
def parse_input(self):
"""
Load in the input set from the input file (if it can be found)
"""
if len(self.data["input_filename"]) == 0:
return
input_filename = self.data["input_filename"][0][0]
for ext in ["", ".gz", ".GZ", ".z", ".Z", ".bz2", ".BZ2"]:
if os.path.exists(os.path.join(self.dir, input_filename + ext)):
self.input = Cp2kInput.from_file(os.path.join(self.dir, input_filename + ext))
return
warnings.warn("Original input file not found. Some info may be lost.")
def parse_global_params(self):
"""
Parse the GLOBAL section parameters from CP2K output file into a dictionary.
"""
pat = re.compile(r"\s+GLOBAL\|\s+([\w+\s]*)\s+(\w+)")
self.read_pattern({"global": pat}, terminate_on_match=False, reverse=False)
for d in self.data["global"]:
d[0], d[1] = _postprocessor(d[0]), str(d[1])
self.data["global"] = dict(self.data["global"])
def parse_dft_params(self):
"""
Parse the DFT parameters (as well as functional, HF, vdW params)
"""
pat = re.compile(r"\s+DFT\|\s+(\w.*)\s\s\s(.*)$")
self.read_pattern(
{"dft": pat},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
self.data["dft"] = dict(self.data["dft"])
self.data["dft"]["cutoffs"] = {}
self.data["dft"]["cutoffs"]["density"] = self.data["dft"].pop("Cutoffs:_density", None)
self.data["dft"]["cutoffs"]["gradient"] = self.data["dft"].pop("gradient", None)
self.data["dft"]["cutoffs"]["tau"] = self.data["dft"].pop("tau", None)
# Functional
functional = re.compile(r"\s+FUNCTIONAL\|\s+(.+):")
self.read_pattern(
{"functional": functional},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
self.data["dft"]["functional"] = [item for sublist in self.data.pop("functional", None) for item in sublist]
# HF exchange info
hfx = re.compile(r"\s+HFX_INFO\|\s+(.+):\s+(.*)$")
self.read_pattern(
{"hfx": hfx},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
if len(self.data["hfx"]) > 0:
self.data["dft"]["hfx"] = dict(self.data.pop("hfx"))
# Van der waals correction
vdw = re.compile(r"\s+vdW POTENTIAL\|\s+(DFT-D.)\s")
self.read_pattern(
{"vdw": vdw},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
if len(self.data["vdw"]) > 0:
self.data["dft"]["vdw"] = self.data.pop("vdw")[0][0]
def parse_scf_params(self):
"""
Retrieve the most import SCF parameters: the max number of scf cycles (max_scf),
the convergence cutoff for scf (eps_scf),
:return:
"""
max_scf = re.compile(r"max_scf:\s+(\d+)")
eps_scf = re.compile(r"eps_scf:\s+(\d+)")
self.read_pattern(
{"max_scf": max_scf, "eps_scf": eps_scf},
terminate_on_match=True,
reverse=False,
)
self.data["scf"] = {}
self.data["scf"]["max_scf"] = self.data.pop("max_scf")[0][0] if self.data["max_scf"] else None
self.data["scf"]["eps_scf"] = self.data.pop("eps_scf")[0][0] if self.data["eps_scf"] else None
def parse_cell_params(self):
"""
Parse the lattice parameters (initial) from the output file
"""
cell_volume = re.compile(r"\s+CELL\|\sVolume.*\s(\d+\.\d+)")
vectors = re.compile(r"\s+CELL\| Vector.*\s(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)")
angles = re.compile(r"\s+CELL\| Angle.*\s(\d+\.\d+)")
self.read_pattern(
{"cell_volume": cell_volume, "lattice": vectors, "angles": angles},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
i = iter(self.data["lattice"])
return list(zip(i, i, i))
def parse_atomic_kind_info(self):
"""
Parse info on what atomic kinds are present and what basis/pseudopotential is describing each of them.
"""
kinds = re.compile(r"Atomic kind: (\w+)")
orbital_basis_set = re.compile(r"Orbital Basis Set\s+(.+$)")
potential_information = re.compile(r"(?:Potential information for\s+(.+$))|(?:atomic kind are GHOST atoms)")
auxiliary_basis_set = re.compile(r"Auxiliary Fit Basis Set\s+(.+$)")
core_electrons = re.compile(r"Total number of core electrons\s+(\d+)")
valence_electrons = re.compile(r"Total number of valence electrons\s+(\d+)")
pseudo_energy = re.compile(r"Total Pseudopotential Energy.+(-?\d+.\d+)")
self.read_pattern(
{
"kinds": kinds,
"orbital_basis_set": orbital_basis_set,
"potential_info": potential_information,
"auxiliary_basis_set": auxiliary_basis_set,
"core_electrons": core_electrons,
"valence_electrons": valence_electrons,
"pseudo_energy": pseudo_energy,
},
terminate_on_match=True,
postprocess=str,
reverse=False,
)
atomic_kind_info = {}
for i, kind in enumerate(self.data["kinds"]):
atomic_kind_info[kind[0]] = {
"orbital_basis_set": self.data.get("orbital_basis_set")[i][0],
"pseudo_potential": self.data.get("potential_info")[i][0],
"kind_number": i + 1,
}
try:
atomic_kind_info[kind[0]]["valence_electrons"] = self.data.get("valence_electrons")[i][0]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["valence_electrons"] = None
try:
atomic_kind_info[kind[0]]["core_electrons"] = self.data.get("core_electrons")[i][0]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["core_electrons"] = None
try:
atomic_kind_info[kind[0]]["auxiliary_basis_set"] = self.data.get("auxiliary_basis_set")[i]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["auxiliary_basis_set"] = None
try:
atomic_kind_info[kind[0]]["total_pseudopotential_energy"] = (
self.data.get("total_pseudopotential_energy")[i][0] * _hartree_to_ev_
)
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["total_pseudopotential_energy"] = None
self.data["atomic_kind_info"] = atomic_kind_info
def parse_total_numbers(self):
"""
Parse total numbers (not usually important)
"""
atomic_kinds = r"- Atomic kinds:\s+(\d+)"
atoms = r"- Atoms:\s+(\d+)"
shell_sets = r"- Shell sets:\s+(\d+)"
shells = r"- Shells:\s+(\d+)"
primitive_funcs = r"- Primitive Cartesian functions:\s+(\d+)"
cart_base_funcs = r"- Cartesian basis functions:\s+(\d+)"
spher_base_funcs = r"- Spherical basis functions:\s+(\d+)"
self.read_pattern(
{
"atomic_kinds": atomic_kinds,
"atoms": atoms,
"shell_sets": shell_sets,
"shells": shells,
"primitive_cartesian_functions": primitive_funcs,
"cartesian_basis_functions": cart_base_funcs,
"spherical_basis_functions": spher_base_funcs,
},
terminate_on_match=True,
)
def parse_scf_opt(self):
"""
Parse the SCF cycles (not usually important)
"""
header = r"Step\s+Update method\s+Time\s+Convergence\s+Total energy\s+Change" + r"\s+\-+"
row = (
r"(\d+)\s+(\S+\s?\S+)\s+(\d+\.\d+E\+\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)?"
+ r"\s+(-?\d+\.\d+)\s+(-?\d+\.\d+E[\+\-]?\d+)"
)
footer = r"^$"
scfs = self.read_table_pattern(
header_pattern=header,
row_pattern=row,
footer_pattern=footer,
last_one_only=False,
)
self.data["electronic_steps"] = []
self.data["convergence"] = []
self.data["scf_time"] = []
for i in scfs:
self.data["scf_time"].append([float(j[-4]) for j in i])
self.data["convergence"].append([float(j[-3]) for j in i if j[-3] != "None"])
self.data["electronic_steps"].append([float(j[-2]) for j in i])
def parse_timing(self):
"""
Parse the timing info (how long did the run take).
"""
header = (
r"SUBROUTINE\s+CALLS\s+ASD\s+SELF TIME\s+TOTAL TIME" + r"\s+MAXIMUM\s+AVERAGE\s+MAXIMUM\s+AVERAGE\s+MAXIMUM"
)
row = r"(\w+)\s+(.+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)"
footer = r"\-+"
timing = self.read_table_pattern(
header_pattern=header,
row_pattern=row,
footer_pattern=footer,
last_one_only=True,
postprocess=_postprocessor,
)
self.timing = {}
for t in timing:
self.timing[t[0]] = {
"calls": {"max": t[1]},
"asd": t[2],
"self_time": {"average": t[3], "maximum": t[4]},
"total_time": {"average": t[5], "maximum": t[6]},
}
def parse_opt_steps(self):
"""
Parse the geometry optimization information
"""
# "Informations at step =" Summary block (floating point terms)
total_energy = re.compile(r"\s+Total Energy\s+=\s+(-?\d+.\d+)")
real_energy_change = re.compile(r"\s+Real energy change\s+=\s+(-?\d+.\d+)")
prediced_change_in_energy = re.compile(r"\s+Predicted change in energy\s+=\s+(-?\d+.\d+)")
scaling_factor = re.compile(r"\s+Scaling factor\s+=\s+(-?\d+.\d+)")
step_size = re.compile(r"\s+Step size\s+=\s+(-?\d+.\d+)")
trust_radius = re.compile(r"\s+Trust radius\s+=\s+(-?\d+.\d+)")
used_time = re.compile(r"\s+Used time\s+=\s+(-?\d+.\d+)")
# For RUN_TYPE=CELL_OPT
pressure_deviation = re.compile(r"\s+Pressure Deviation.*=\s+(-?\d+.\d+)")
pressure_tolerance = re.compile(r"\s+Pressure Tolerance.*=\s+(-?\d+.\d+)")
self.read_pattern(
{
"total_energy": total_energy,
"real_energy_change": real_energy_change,
"predicted_change_in_energy": prediced_change_in_energy,
"scaling_factor": scaling_factor,
"step_size": step_size,
"trust_radius": trust_radius,
"used_time": used_time,
"pressure_deviation": pressure_deviation,
"pressure_tolerance": pressure_tolerance,
},
terminate_on_match=False,
postprocess=float,
)
# "Informations at step =" Summary block (bool terms)
decrease_in_energy = re.compile(r"\s+Decrease in energy\s+=\s+(\w+)")
converged_step_size = re.compile(r"\s+Convergence in step size\s+=\s+(\w+)")
converged_rms_step = re.compile(r"\s+Convergence in RMS step\s+=\s+(\w+)")
converged_in_grad = re.compile(r"\s+Conv\. in gradients\s+=\s+(\w+)")
converged_in_rms_grad = re.compile(r"\s+Conv\. in RMS gradients\s+=\s+(\w+)")
pressure_converged = re.compile(r"\s+Conv\. for PRESSURE\s+=\s+(\w+)")
self.read_pattern(
{
"decrease_in_energy": decrease_in_energy,
"converged_step_size": converged_step_size,
"converged_rms_step": converged_rms_step,
"converged_in_grad": converged_in_grad,
"converged_in_rms_grad": converged_in_rms_grad,
"pressure_converged": pressure_converged,
},
terminate_on_match=False,
postprocess=_postprocessor,
)
def parse_mulliken(self):
"""
Parse the mulliken population analysis info for each step
:return:
"""
header = r"Mulliken Population Analysis.+Net charge"
pattern = r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer = r".+Total charge"
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
if d:
print("Found data, but not yet implemented!")
def parse_hirshfeld(self):
"""
parse the hirshfeld population analysis for each step
"""
uks = self.spin_polarized
header = r"Hirshfeld Charges.+Net charge"
footer = r"^$"
if not uks:
pattern = r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
for i, ionic_step in enumerate(d):
population = []
net_charge = []
for site in ionic_step:
population.append(site[4])
net_charge.append(site[5])
hirshfeld = [{"population": population[j], "net_charge": net_charge[j]} for j in range(len(population))]
self.structures[i].add_site_property("hirshfield", hirshfeld)
else:
pattern = (
r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+"
+ r"(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
)
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
for i, ionic_step in enumerate(d):
population = []
net_charge = []
spin_moment = []
for site in ionic_step:
population.append(tuple(site[4:5]))
spin_moment.append(site[6])
net_charge.append(site[7])
hirshfeld = [
{
"population": population[j],
"net_charge": net_charge[j],
"spin_moment": spin_moment[j],
}
for j in range(len(population))
]
self.structures[i].add_site_property("hirshfield", hirshfeld)
def parse_mo_eigenvalues(self):
"""
Parse the MO eigenvalues from the cp2k output file. Will get the eigenvalues (and band gap)
at each ionic step (if more than one exist).
Everything is decomposed by spin channel. If calculation was performed without spin polarization,
then only Spin.up will be present, which represents the average of up and down.
"""
eigenvalues = []
band_gap = []
efermi = []
with zopen(self.filename, "rt") as f:
lines = iter(f.readlines())
for line in lines:
try:
if line.__contains__(" occupied subspace spin"):
eigenvalues.append(
{
"occupied": {Spin.up: [], Spin.down: []},
"unoccupied": {Spin.up: [], Spin.down: []},
}
)
efermi.append({Spin.up: None, Spin.down: None})
next(lines)
while True:
line = next(lines)
if line.__contains__("Fermi"):
efermi[-1][Spin.up] = float(line.split()[-1])
break
eigenvalues[-1]["occupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
next(lines)
line = next(lines)
if line.__contains__(" occupied subspace spin"):
next(lines)
while True:
line = next(lines)
if line.__contains__("Fermi"):
efermi[-1][Spin.down] = float(line.split()[-1])
break
eigenvalues[-1]["occupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
if line.__contains__(" unoccupied subspace spin"):
next(lines)
line = next(lines)
while True:
if line.__contains__("WARNING : did not converge"):
warnings.warn(
"Convergence of eigenvalues for " "unoccupied subspace spin 1 did NOT converge"
)
next(lines)
next(lines)
next(lines)
line = next(lines)
eigenvalues[-1]["unoccupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
next(lines)
line = next(lines)
break
line = next(lines)
if "Eigenvalues" in line or "HOMO" in line:
break
eigenvalues[-1]["unoccupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
if line.__contains__(" unoccupied subspace spin"):
next(lines)
line = next(lines)
while True:
if line.__contains__("WARNING : did not converge"):
warnings.warn(
"Convergence of eigenvalues for " "unoccupied subspace spin 2 did NOT converge"
)
next(lines)
next(lines)
next(lines)
line = next(lines)
eigenvalues[-1]["unoccupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
break
line = next(lines)
if line.__contains__("HOMO"):
next(lines)
break
try:
eigenvalues[-1]["unoccupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
except AttributeError:
break
except ValueError:
eigenvalues = [
{
"occupied": {Spin.up: None, Spin.down: None},
"unoccupied": {Spin.up: None, Spin.down: None},
}
]
warnings.warn("Convergence of eigenvalues for one or more subspaces did NOT converge")
self.data["eigenvalues"] = eigenvalues
self.data["band_gap"] = band_gap
if len(eigenvalues) == 0:
warnings.warn("No MO eigenvalues detected.")
return
# self.data will always contained the eigenvalues resolved by spin channel. The average vbm, cbm, gap,
# and fermi are saved as class attributes, as there is (usually) no assymmetry in these values for
# common materials
if self.spin_polarized:
self.data["vbm"] = {
Spin.up: np.max(eigenvalues[-1]["occupied"][Spin.up]),
Spin.down: np.max(eigenvalues[-1]["occupied"][Spin.down]),
}
self.data["cbm"] = {
Spin.up: np.min(eigenvalues[-1]["unoccupied"][Spin.up]),
Spin.down: np.min(eigenvalues[-1]["unoccupied"][Spin.down]),
}
self.vbm = (self.data["vbm"][Spin.up] + self.data["vbm"][Spin.down]) / 2
self.cbm = (self.data["cbm"][Spin.up] + self.data["cbm"][Spin.down]) / 2
self.efermi = (efermi[-1][Spin.up] + efermi[-1][Spin.down]) / 2
else:
self.data["vbm"] = {
Spin.up: np.max(eigenvalues[-1]["occupied"][Spin.up]),
Spin.down: None,
}
self.data["cbm"] = {
Spin.up: np.min(eigenvalues[-1]["unoccupied"][Spin.up]),
Spin.down: None,
}
self.vbm = self.data["vbm"][Spin.up]
self.cbm = self.data["cbm"][Spin.up]
self.efermi = efermi[-1][Spin.up]
def parse_homo_lumo(self):
"""
Find the HOMO - LUMO gap in [eV]. Returns the last value. For gaps/eigenvalues decomposed by
spin up/spin down channel and over many ionic steps, see parse_mo_eigenvalues()
"""
pattern = re.compile(r"HOMO.*-.*LUMO.*gap.*\s(-?\d+.\d+)")
self.read_pattern(
patterns={"band_gap": pattern},
reverse=True,
terminate_on_match=False,
postprocess=float,
)
bg = {Spin.up: [], Spin.down: []}
for i in range(len(self.data["band_gap"])):
if self.spin_polarized:
if i % 2:
bg[Spin.up].append(self.data["band_gap"][i][0])
else:
bg[Spin.down].append(self.data["band_gap"][i][0])
else:
bg[Spin.up].append(self.data["band_gap"][i][0])
bg[Spin.down].append(self.data["band_gap"][i][0])
self.data["band_gap"] = bg
self.band_gap = (bg[Spin.up][-1] + bg[Spin.down][-1]) / 2 if bg[Spin.up] and bg[Spin.down] else None
def parse_dos(self, pdos_files=None, ldos_files=None, sigma=0):
"""
Parse the pdos_ALPHA files created by cp2k, and assimilate them into a CompleteDos object.
Either provide a list of PDOS file paths, or use glob to find the .pdos_ALPHA extension in
the calculation directory.
Args:
pdos_files (list): list of pdos file paths, otherwise they will be inferred
ldos_Files (list): list of ldos file paths, otherwise they will be inferred
sigma (float): Gaussian smearing parameter, if desired. Because cp2k is generally
used as a gamma-point only code, this is often needed to get smooth DOS that
are comparable to k-point averaged DOS
"""
if pdos_files is None:
pdos_files = self.filenames["PDOS"]
if ldos_files is None:
ldos_files = self.filenames["LDOS"]
# Parse specie projected dos
tdos, pdoss, ldoss = None, {}, {}
for pdos_file in pdos_files:
_pdos, _tdos = parse_dos(pdos_file, total=True, sigma=sigma)
for k in _pdos:
if k in pdoss:
for orbital in _pdos[k]:
pdoss[k][orbital].densities.update(_pdos[k][orbital].densities)
else:
pdoss.update(_pdos)
if not tdos:
tdos = _tdos
else:
if not all([_tdos.densities.keys() == tdos.densities.keys()]):
tdos.densities.update(_tdos.densities)
else:
tdos.densities = add_densities(density1=_tdos.densities, density2=tdos.densities)
# parse any site-projected dos
for ldos_file in ldos_files:
_pdos = parse_dos(ldos_file, sigma=sigma)
for k in _pdos:
if k in ldoss:
for orbital in _pdos[k]:
ldoss[k][orbital].densities.update(_pdos[k][orbital].densities)
else:
ldoss.update(_pdos)
self.data["pdos"] = jsanitize(pdoss, strict=True)
self.data["ldos"] = jsanitize(ldoss, strict=True)
self.data["tdos"] = jsanitize(tdos, strict=True)
# If number of site-projected dos == number of sites, assume they are bijective
# and create the CompleteDos object
_ldoss = {}
if len(ldoss) == len(self.initial_structure):
for k in self.data["ldos"]:
_ldoss[self.initial_structure[int(k) - 1]] = self.data["ldos"][k]
self.data["cdos"] = CompleteDos(self.final_structure, total_dos=tdos, pdoss=_ldoss)
@staticmethod
def _gauss_smear(densities, energies, npts, width):
if not width:
return densities
"""Return a gaussian smeared DOS"""
d = np.zeros(npts)
e_s = np.linspace(min(energies), max(energies), npts)
for e, _pd in zip(energies, densities):
weight = np.exp(-(((e_s - e) / width) ** 2)) / (np.sqrt(np.pi) * width)
d += _pd * weight
return d
def read_pattern(self, patterns, reverse=False, terminate_on_match=False, postprocess=str):
r"""
This function originally comes from pymatgen.io.vasp.outputs Outcar class
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(
self.filename,
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess,
)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(
self,
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
attribute_name=None,
last_one_only=True,
):
r"""
This function originally comes from pymatgen.io.vasp.outputs Outcar class
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, "rt") as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def as_dict(self):
"""
Return dictionary representation of the output
"""
d = {"input": {}, "output": {}}
d["total_time"] = self.timing["CP2K"]["total_time"]["maximum"]
d["run_type"] = self.run_type
d["input"]["global"] = self.data.get("global")
d["input"]["dft"] = self.data.get("dft", None)
d["input"]["scf"] = self.data.get("scf", None)
d["input"]["structure"] = self.initial_structure.as_dict()
d["input"]["atomic_kind_info"] = self.data.get("atomic_kind_info", None)
d["input"]["cp2k_input"] = self.input
d["ran_successfully"] = self.completed
d["cp2k_version"] = self.cp2k_version
d["output"]["structure"] = self.final_structure.as_dict()
d["output"]["ionic_steps"] = self.ionic_steps
d["composition"] = self.composition.as_dict()
d["output"]["energy"] = self.final_energy
d["output"]["energy_per_atom"] = self.final_energy / self.composition.num_atoms
d["output"]["bandgap"] = self.band_gap
d["output"]["cbm"] = self.cbm
d["output"]["vbm"] = self.vbm
d["output"]["efermi"] = self.efermi
d["output"]["is_metal"] = self.is_metal
return d
def parse_energy_file(energy_file):
"""
Parses energy file for calculations with multiple ionic steps.
"""
columns = [
"step",
"kinetic_energy",
"temp",
"potential_energy",
"conserved_quantity",
"used_time",
]
df = pd.read_table(energy_file, skiprows=1, names=columns, sep=r"\s+")
df["kinetic_energy"] = df["kinetic_energy"] * _hartree_to_ev_
df["potential_energy"] = df["potential_energy"] * _hartree_to_ev_
df["conserved_quantity"] = df["conserved_quantity"] * _hartree_to_ev_
df.astype(float)
d = {c: df[c].values for c in columns}
return d
def parse_dos(dos_file=None, spin_channel=None, total=False, sigma=0):
"""
Parse a single DOS file created by cp2k. Must contain one PDOS snapshot. i.e. you cannot
use this cannot deal with multiple concatenated dos files.
Args:
dos_file (list): list of pdos_ALPHA file paths
spin_channel (int): Which spin channel the file corresponds to. By default, CP2K will
write the file with ALPHA or BETA in the filename (for spin up or down), but
you can specify this here, in case you have a manual file name.
spin_channel == 1 --> spin up, spin_channel == -1 --> spin down.
total (bool): Whether to grab the total occupations, or the orbital decomposed ones.
sigma (float): width for gaussian smearing, if desired
Returns:
Everything necessary to create a dos object, in dict format:
(1) orbital decomposed DOS dict:
i.e. pdoss = {specie: {orbital.s: {Spin.up: ... }, orbital.px: {Spin.up: ... } ...}}
(2) energy levels of this dos file
(3) fermi energy (in eV).
DOS object is not created here
"""
if spin_channel:
spin = Spin(spin_channel)
else:
spin = Spin.down if os.path.split(dos_file)[-1].__contains__("BETA") else Spin.up
with zopen(dos_file, "rt") as f:
lines = f.readlines()
kind = re.search(r"atomic kind\s(.*)\sat iter", lines[0]) or re.search(r"list\s(\d+)\s(.*)\sat iter", lines[0])
kind = kind.groups()[0]
efermi = float(lines[0].split()[-2]) * _hartree_to_ev_
header = re.split(r"\s{2,}", lines[1].replace("#", "").strip())[2:]
dat = np.loadtxt(dos_file)
def cp2k_to_pmg_labels(x):
if x == "p":
return "px"
if x == "d":
return "dxy"
if x == "f":
return "f_3"
if x == "d-2":
return "dxy"
if x == "d-1":
return "dyz"
if x == "d0":
return "dz2"
if x == "d+1":
return "dxz"
if x == "d+2":
return "dx2"
if x == "f-3":
return "f_3"
if x == "f-2":
return "f_2"
if x == "f-1":
return "f_1"
if x == "f0":
return "f0"
if x == "f+1":
return "f1"
if x == "f+2":
return "f2"
if x == "f+3":
return "f3"
return x
header = [cp2k_to_pmg_labels(h) for h in header]
data = dat[:, 1:]
data[:, 0] *= _hartree_to_ev_
energies = data[:, 0] * _hartree_to_ev_
data = gauss_smear(data, sigma)
pdos = {
kind: {
getattr(Orbital, h): Dos(efermi=efermi, energies=energies, densities={spin: data[:, i + 2]})
for i, h in enumerate(header)
}
}
if total:
tdos = Dos(
efermi=efermi,
energies=energies,
densities={spin: np.sum(data[:, 2:], axis=1)},
)
return pdos, tdos
return pdos
def gauss_smear(data, width):
"""Return a gaussian smeared DOS"""
if not width:
return data
npts, nOrbitals = data.shape
e_s = np.linspace(np.min(data[:, 0]), np.max(data[:, 0]), data.shape[0])
grid = np.multiply(np.ones((npts, npts)), e_s).T
def smear(d):
return np.sum(
np.multiply(
np.exp(-((np.subtract(grid, data[:, 0]) / width) ** 2)) / (np.sqrt(np.pi) * width),
d,
),
axis=1,
)
return np.array([smear(data[:, i]) for i in range(1, nOrbitals)]).T
|
gmatteo/pymatgen
|
pymatgen/io/cp2k/outputs.py
|
Python
|
mit
| 55,516
|
[
"CP2K",
"Gaussian",
"VASP",
"pymatgen"
] |
a1b2a040ad7a239d8bdd36a6dc671e339366b43b704fd143c1d210c12d07144d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.