repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
deeplook/svglib | svglib/svglib.py | AttributeConverter.getAllAttributes | python | def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict | Return a dictionary of all attributes of svgNode or those inherited by it. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L225-L242 | [
"def node_name(node):\n \"\"\"Return lxml node name without the namespace prefix.\"\"\"\n\n try:\n return node.tag.split('}')[-1]\n except AttributeError:\n pass\n",
"def getAllAttributes(self, svgNode):\n \"Return a dictionary of all attributes of svgNode or those inherited by it.\"\n\n dict = {}\n\n if node_name(svgNode.getparent()) == 'g':\n dict.update(self.getAllAttributes(svgNode.getparent()))\n\n style = svgNode.attrib.get(\"style\")\n if style:\n d = self.parseMultiAttributes(style)\n dict.update(d)\n\n for key, value in svgNode.attrib.items():\n if key != \"style\":\n dict[key] = value\n\n return dict\n"
] | class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
|
deeplook/svglib | svglib/svglib.py | AttributeConverter.convertTransform | python | def convertTransform(self, svgAttr):
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result | Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))] | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L248-L287 | null | class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
|
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertLength | python | def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length | Convert length to points. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L305-L334 | null | class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
|
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertLengthList | python | def convertLengthList(self, svgAttr):
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)] | Convert a list of lengths. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L336-L338 | [
"def split_attr_list(attr):\n return shlex.split(attr.strip().replace(',', ' '))\n"
] | class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
|
deeplook/svglib | svglib/svglib.py | Svg2RlgAttributeConverter.convertColor | python | def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None | Convert string to a RL color object. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L349-L384 | null | class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
|
deeplook/svglib | svglib/svglib.py | SvgRenderer.get_clippath | python | def get_clippath(self, node):
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path | Return the clipping Path object referenced by the node 'clip-path'
attribute, if any. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L626-L648 | [
"def get_path_from_node(node):\n for child in node.getchildren():\n if node_name(child) == 'path':\n group = self.shape_converter.convertShape('path', NodeTracker(child))\n return group.contents[-1]\n else:\n return get_path_from_node(child)\n"
] | class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
view_box = self.get_box(node, default_box=True)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = svg_node.attrib.get("width"), svg_node.attrib.get("height")
width, height = map(self.attrConverter.convertLength, (width, height))
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
item = self.renderG(n)
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
getAttr = node.getAttribute
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = getAttr("{%s}space" % XML_NS) == 'preserve'
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = map(getAttr, ("x", "y"))
x, y = map(self.attrConverter.convertLength, (x, y))
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = map(getAttr, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
if view_box.height != height:
y_scale = height / view_box.height
if view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
|
deeplook/svglib | svglib/svglib.py | SvgRenderer.xlink_href_target | python | def xlink_href_target(self, node, group=None):
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED | Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L666-L744 | null | class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
view_box = self.get_box(node, default_box=True)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = svg_node.attrib.get("width"), svg_node.attrib.get("height")
width, height = map(self.attrConverter.convertLength, (width, height))
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
item = self.renderG(n)
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
getAttr = node.getAttribute
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = getAttr("{%s}space" % XML_NS) == 'preserve'
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = map(getAttr, ("x", "y"))
x, y = map(self.attrConverter.convertLength, (x, y))
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = map(getAttr, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
if view_box.height != height:
y_scale = height / view_box.height
if view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
|
deeplook/svglib | svglib/svglib.py | Svg2RlgShapeConverter.clean_text | python | def clean_text(self, text, preserve_space):
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text | Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L977-L989 | null | class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
|
deeplook/svglib | svglib/svglib.py | Svg2RlgShapeConverter.applyTransformOnGroup | python | def applyTransformOnGroup(self, transform, group):
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values)) | Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>) | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1231-L1267 | null | class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
|
deeplook/svglib | svglib/svglib.py | Svg2RlgShapeConverter.applyStyleOnShape | python | def applyStyleOnShape(self, shape, node, only_explicit=False):
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity | Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1269-L1321 | [
"def applyStyleOnShape(self, shape, node, only_explicit=False):\n \"\"\"\n Apply styles from an SVG element to an RLG shape.\n If only_explicit is True, only attributes really present are applied.\n \"\"\"\n\n # RLG-specific: all RLG shapes\n \"Apply style attributes of a sequence of nodes to an RL shape.\"\n\n # tuple format: (svgAttr, rlgAttr, converter, default)\n mappingN = (\n (\"fill\", \"fillColor\", \"convertColor\", \"black\"),\n (\"fill-opacity\", \"fillOpacity\", \"convertOpacity\", 1),\n (\"fill-rule\", \"_fillRule\", \"convertFillRule\", \"nonzero\"),\n (\"stroke\", \"strokeColor\", \"convertColor\", \"none\"),\n (\"stroke-width\", \"strokeWidth\", \"convertLength\", \"1\"),\n (\"stroke-opacity\", \"strokeOpacity\", \"convertOpacity\", 1),\n (\"stroke-linejoin\", \"strokeLineJoin\", \"convertLineJoin\", \"0\"),\n (\"stroke-linecap\", \"strokeLineCap\", \"convertLineCap\", \"0\"),\n (\"stroke-dasharray\", \"strokeDashArray\", \"convertDashArray\", \"none\"),\n )\n mappingF = (\n (\"font-family\", \"fontName\", \"convertFontFamily\", DEFAULT_FONT_NAME),\n (\"font-size\", \"fontSize\", \"convertLength\", \"12\"),\n (\"text-anchor\", \"textAnchor\", \"id\", \"start\"),\n )\n\n if shape.__class__ == Group:\n # Recursively apply style on Group subelements\n for subshape in shape.contents:\n self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)\n return\n\n ac = self.attrConverter\n for mapping in (mappingN, mappingF):\n if shape.__class__ != String and mapping == mappingF:\n continue\n for (svgAttrName, rlgAttr, func, default) in mapping:\n svgAttrValue = ac.findAttr(node, svgAttrName)\n if svgAttrValue == '':\n if only_explicit:\n continue\n else:\n svgAttrValue = default\n if svgAttrValue == \"currentColor\":\n svgAttrValue = ac.findAttr(node.getparent(), \"color\") or default\n try:\n meth = getattr(ac, func)\n setattr(shape, rlgAttr, meth(svgAttrValue))\n except (AttributeError, KeyError, ValueError):\n pass\n if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:\n shape.fillColor.alpha = shape.fillOpacity\n"
] | class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
|
deeplook/svglib | svglib/utils.py | split_floats | python | def split_floats(op, min_num, value):
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res | Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]] | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L11-L25 | null | """
This is a collection of utilities used by the ``svglib`` code module.
"""
import re
from math import acos, ceil, copysign, cos, degrees, fabs, hypot, radians, sin, sqrt
from reportlab.graphics.shapes import mmult, rotate, translate, transformPoint
def normalise_svg_path(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result
def convert_quadratic_to_cubic_path(q0, q1, q2):
"""
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
"""
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3
# ***********************************************
# Helper functions for elliptical arc conversion.
# ***********************************************
def vector_angle(u, v):
d = hypot(*u) * hypot(*v)
if d == 0:
return 0
c = (u[0] * v[0] + u[1] * v[1]) / d
if c < -1:
c = -1
elif c > 1:
c = 1
s = u[0] * v[1] - u[1] * v[0]
return degrees(copysign(acos(c), s))
def end_point_to_center_parameters(x1, y1, x2, y2, fA, fS, rx, ry, phi=0):
'''
See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5
note that we reduce phi to zero outside this routine
'''
rx = fabs(rx)
ry = fabs(ry)
# step 1
if phi:
phi_rad = radians(phi)
sin_phi = sin(phi_rad)
cos_phi = cos(phi_rad)
tx = 0.5 * (x1 - x2)
ty = 0.5 * (y1 - y2)
x1d = cos_phi * tx - sin_phi * ty
y1d = sin_phi * tx + cos_phi * ty
else:
x1d = 0.5 * (x1 - x2)
y1d = 0.5 * (y1 - y2)
# step 2
# we need to calculate
# (rx*rx*ry*ry-rx*rx*y1d*y1d-ry*ry*x1d*x1d)
# -----------------------------------------
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# that is equivalent to
#
# rx*rx*ry*ry
# = ----------------------------- - 1
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# 1
# = -------------------------------- - 1
# x1d*x1d/(rx*rx) + y1d*y1d/(ry*ry)
#
# = 1/r - 1
#
# it turns out r is what they recommend checking
# for the negative radicand case
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
if r > 1:
rr = sqrt(r)
rx *= rr
ry *= rr
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
elif r != 0:
r = 1 / r - 1
if -1e-10 < r < 0:
r = 0
r = sqrt(r)
if fA == fS:
r = -r
cxd = (r * rx * y1d) / ry
cyd = -(r * ry * x1d) / rx
# step 3
if phi:
cx = cos_phi * cxd - sin_phi * cyd + 0.5 * (x1 + x2)
cy = sin_phi * cxd + cos_phi * cyd + 0.5 * (y1 + y2)
else:
cx = cxd + 0.5 * (x1 + x2)
cy = cyd + 0.5 * (y1 + y2)
# step 4
theta1 = vector_angle((1, 0), ((x1d - cxd) / rx, (y1d - cyd) / ry))
dtheta = vector_angle(
((x1d - cxd) / rx, (y1d - cyd) / ry),
((-x1d - cxd) / rx, (-y1d - cyd) / ry)
) % 360
if fS == 0 and dtheta > 0:
dtheta -= 360
elif fS == 1 and dtheta < 0:
dtheta += 360
return cx, cy, rx, ry, -theta1, -dtheta
def bezier_arc_from_centre(cx, cy, rx, ry, start_ang=0, extent=90):
if abs(extent) <= 90:
nfrag = 1
frag_angle = float(extent)
else:
nfrag = int(ceil(abs(extent) / 90.))
frag_angle = float(extent) / nfrag
if frag_angle == 0:
return []
frag_rad = radians(frag_angle)
half_rad = frag_rad * 0.5
kappa = abs(4. / 3. * (1. - cos(half_rad)) / sin(half_rad))
if frag_angle < 0:
kappa = -kappa
point_list = []
theta1 = radians(start_ang)
start_rad = theta1 + frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
for i in range(nfrag):
c0 = c1
s0 = s1
theta1 = start_rad + i * frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
point_list.append((cx + rx * c0,
cy - ry * s0,
cx + rx * (c0 - kappa * s0),
cy - ry * (s0 + kappa * c0),
cx + rx * (c1 + kappa * s1),
cy - ry * (s1 - kappa * c1),
cx + rx * c1,
cy - ry * s1))
return point_list
def bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2):
if (x1 == x2 and y1 == y2):
# From https://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes:
# If the endpoints (x1, y1) and (x2, y2) are identical, then this is
# equivalent to omitting the elliptical arc segment entirely.
return []
if phi:
# Our box bezier arcs can't handle rotations directly
# move to a well known point, eliminate phi and transform the other point
mx = mmult(rotate(-phi), translate(-x1, -y1))
tx2, ty2 = transformPoint(mx, (x2, y2))
# Convert to box form in unrotated coords
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
0, 0, tx2, ty2, fA, fS, rx, ry
)
bp = bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
# Re-rotate by the desired angle and add back the translation
mx = mmult(translate(x1, y1), rotate(phi))
res = []
for x1, y1, x2, y2, x3, y3, x4, y4 in bp:
res.append(
transformPoint(mx, (x1, y1)) + transformPoint(mx, (x2, y2)) +
transformPoint(mx, (x3, y3)) + transformPoint(mx, (x4, y4))
)
return res
else:
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
x1, y1, x2, y2, fA, fS, rx, ry
)
return bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
|
deeplook/svglib | svglib/utils.py | normalise_svg_path | python | def normalise_svg_path(attr):
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result | Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []] | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L28-L72 | [
"def split_floats(op, min_num, value):\n \"\"\"Split `value`, a list of numbers as a string, to a list of float numbers.\n\n Also optionally insert a `l` or `L` operation depending on the operation\n and the length of values.\n Example: with op='m' and value='10,20 30,40,' the returned value will be\n ['m', [10.0, 20.0], 'l', [30.0, 40.0]]\n \"\"\"\n floats = [float(seq) for seq in re.findall(r'(-?\\d*\\.?\\d*(?:e[+-]\\d+)?)', value) if seq]\n res = []\n for i in range(0, len(floats), min_num):\n if i > 0 and op in {'m', 'M'}:\n op = 'l' if op == 'm' else 'L'\n res.extend([op, floats[i:i + min_num]])\n return res\n"
] | """
This is a collection of utilities used by the ``svglib`` code module.
"""
import re
from math import acos, ceil, copysign, cos, degrees, fabs, hypot, radians, sin, sqrt
from reportlab.graphics.shapes import mmult, rotate, translate, transformPoint
def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res
def convert_quadratic_to_cubic_path(q0, q1, q2):
"""
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
"""
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3
# ***********************************************
# Helper functions for elliptical arc conversion.
# ***********************************************
def vector_angle(u, v):
d = hypot(*u) * hypot(*v)
if d == 0:
return 0
c = (u[0] * v[0] + u[1] * v[1]) / d
if c < -1:
c = -1
elif c > 1:
c = 1
s = u[0] * v[1] - u[1] * v[0]
return degrees(copysign(acos(c), s))
def end_point_to_center_parameters(x1, y1, x2, y2, fA, fS, rx, ry, phi=0):
'''
See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5
note that we reduce phi to zero outside this routine
'''
rx = fabs(rx)
ry = fabs(ry)
# step 1
if phi:
phi_rad = radians(phi)
sin_phi = sin(phi_rad)
cos_phi = cos(phi_rad)
tx = 0.5 * (x1 - x2)
ty = 0.5 * (y1 - y2)
x1d = cos_phi * tx - sin_phi * ty
y1d = sin_phi * tx + cos_phi * ty
else:
x1d = 0.5 * (x1 - x2)
y1d = 0.5 * (y1 - y2)
# step 2
# we need to calculate
# (rx*rx*ry*ry-rx*rx*y1d*y1d-ry*ry*x1d*x1d)
# -----------------------------------------
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# that is equivalent to
#
# rx*rx*ry*ry
# = ----------------------------- - 1
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# 1
# = -------------------------------- - 1
# x1d*x1d/(rx*rx) + y1d*y1d/(ry*ry)
#
# = 1/r - 1
#
# it turns out r is what they recommend checking
# for the negative radicand case
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
if r > 1:
rr = sqrt(r)
rx *= rr
ry *= rr
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
elif r != 0:
r = 1 / r - 1
if -1e-10 < r < 0:
r = 0
r = sqrt(r)
if fA == fS:
r = -r
cxd = (r * rx * y1d) / ry
cyd = -(r * ry * x1d) / rx
# step 3
if phi:
cx = cos_phi * cxd - sin_phi * cyd + 0.5 * (x1 + x2)
cy = sin_phi * cxd + cos_phi * cyd + 0.5 * (y1 + y2)
else:
cx = cxd + 0.5 * (x1 + x2)
cy = cyd + 0.5 * (y1 + y2)
# step 4
theta1 = vector_angle((1, 0), ((x1d - cxd) / rx, (y1d - cyd) / ry))
dtheta = vector_angle(
((x1d - cxd) / rx, (y1d - cyd) / ry),
((-x1d - cxd) / rx, (-y1d - cyd) / ry)
) % 360
if fS == 0 and dtheta > 0:
dtheta -= 360
elif fS == 1 and dtheta < 0:
dtheta += 360
return cx, cy, rx, ry, -theta1, -dtheta
def bezier_arc_from_centre(cx, cy, rx, ry, start_ang=0, extent=90):
if abs(extent) <= 90:
nfrag = 1
frag_angle = float(extent)
else:
nfrag = int(ceil(abs(extent) / 90.))
frag_angle = float(extent) / nfrag
if frag_angle == 0:
return []
frag_rad = radians(frag_angle)
half_rad = frag_rad * 0.5
kappa = abs(4. / 3. * (1. - cos(half_rad)) / sin(half_rad))
if frag_angle < 0:
kappa = -kappa
point_list = []
theta1 = radians(start_ang)
start_rad = theta1 + frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
for i in range(nfrag):
c0 = c1
s0 = s1
theta1 = start_rad + i * frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
point_list.append((cx + rx * c0,
cy - ry * s0,
cx + rx * (c0 - kappa * s0),
cy - ry * (s0 + kappa * c0),
cx + rx * (c1 + kappa * s1),
cy - ry * (s1 - kappa * c1),
cx + rx * c1,
cy - ry * s1))
return point_list
def bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2):
if (x1 == x2 and y1 == y2):
# From https://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes:
# If the endpoints (x1, y1) and (x2, y2) are identical, then this is
# equivalent to omitting the elliptical arc segment entirely.
return []
if phi:
# Our box bezier arcs can't handle rotations directly
# move to a well known point, eliminate phi and transform the other point
mx = mmult(rotate(-phi), translate(-x1, -y1))
tx2, ty2 = transformPoint(mx, (x2, y2))
# Convert to box form in unrotated coords
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
0, 0, tx2, ty2, fA, fS, rx, ry
)
bp = bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
# Re-rotate by the desired angle and add back the translation
mx = mmult(translate(x1, y1), rotate(phi))
res = []
for x1, y1, x2, y2, x3, y3, x4, y4 in bp:
res.append(
transformPoint(mx, (x1, y1)) + transformPoint(mx, (x2, y2)) +
transformPoint(mx, (x3, y3)) + transformPoint(mx, (x4, y4))
)
return res
else:
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
x1, y1, x2, y2, fA, fS, rx, ry
)
return bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
|
deeplook/svglib | svglib/utils.py | convert_quadratic_to_cubic_path | python | def convert_quadratic_to_cubic_path(q0, q1, q2):
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3 | Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L75-L83 | null | """
This is a collection of utilities used by the ``svglib`` code module.
"""
import re
from math import acos, ceil, copysign, cos, degrees, fabs, hypot, radians, sin, sqrt
from reportlab.graphics.shapes import mmult, rotate, translate, transformPoint
def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res
def normalise_svg_path(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result
# ***********************************************
# Helper functions for elliptical arc conversion.
# ***********************************************
def vector_angle(u, v):
d = hypot(*u) * hypot(*v)
if d == 0:
return 0
c = (u[0] * v[0] + u[1] * v[1]) / d
if c < -1:
c = -1
elif c > 1:
c = 1
s = u[0] * v[1] - u[1] * v[0]
return degrees(copysign(acos(c), s))
def end_point_to_center_parameters(x1, y1, x2, y2, fA, fS, rx, ry, phi=0):
'''
See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5
note that we reduce phi to zero outside this routine
'''
rx = fabs(rx)
ry = fabs(ry)
# step 1
if phi:
phi_rad = radians(phi)
sin_phi = sin(phi_rad)
cos_phi = cos(phi_rad)
tx = 0.5 * (x1 - x2)
ty = 0.5 * (y1 - y2)
x1d = cos_phi * tx - sin_phi * ty
y1d = sin_phi * tx + cos_phi * ty
else:
x1d = 0.5 * (x1 - x2)
y1d = 0.5 * (y1 - y2)
# step 2
# we need to calculate
# (rx*rx*ry*ry-rx*rx*y1d*y1d-ry*ry*x1d*x1d)
# -----------------------------------------
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# that is equivalent to
#
# rx*rx*ry*ry
# = ----------------------------- - 1
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# 1
# = -------------------------------- - 1
# x1d*x1d/(rx*rx) + y1d*y1d/(ry*ry)
#
# = 1/r - 1
#
# it turns out r is what they recommend checking
# for the negative radicand case
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
if r > 1:
rr = sqrt(r)
rx *= rr
ry *= rr
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
elif r != 0:
r = 1 / r - 1
if -1e-10 < r < 0:
r = 0
r = sqrt(r)
if fA == fS:
r = -r
cxd = (r * rx * y1d) / ry
cyd = -(r * ry * x1d) / rx
# step 3
if phi:
cx = cos_phi * cxd - sin_phi * cyd + 0.5 * (x1 + x2)
cy = sin_phi * cxd + cos_phi * cyd + 0.5 * (y1 + y2)
else:
cx = cxd + 0.5 * (x1 + x2)
cy = cyd + 0.5 * (y1 + y2)
# step 4
theta1 = vector_angle((1, 0), ((x1d - cxd) / rx, (y1d - cyd) / ry))
dtheta = vector_angle(
((x1d - cxd) / rx, (y1d - cyd) / ry),
((-x1d - cxd) / rx, (-y1d - cyd) / ry)
) % 360
if fS == 0 and dtheta > 0:
dtheta -= 360
elif fS == 1 and dtheta < 0:
dtheta += 360
return cx, cy, rx, ry, -theta1, -dtheta
def bezier_arc_from_centre(cx, cy, rx, ry, start_ang=0, extent=90):
if abs(extent) <= 90:
nfrag = 1
frag_angle = float(extent)
else:
nfrag = int(ceil(abs(extent) / 90.))
frag_angle = float(extent) / nfrag
if frag_angle == 0:
return []
frag_rad = radians(frag_angle)
half_rad = frag_rad * 0.5
kappa = abs(4. / 3. * (1. - cos(half_rad)) / sin(half_rad))
if frag_angle < 0:
kappa = -kappa
point_list = []
theta1 = radians(start_ang)
start_rad = theta1 + frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
for i in range(nfrag):
c0 = c1
s0 = s1
theta1 = start_rad + i * frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
point_list.append((cx + rx * c0,
cy - ry * s0,
cx + rx * (c0 - kappa * s0),
cy - ry * (s0 + kappa * c0),
cx + rx * (c1 + kappa * s1),
cy - ry * (s1 - kappa * c1),
cx + rx * c1,
cy - ry * s1))
return point_list
def bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2):
if (x1 == x2 and y1 == y2):
# From https://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes:
# If the endpoints (x1, y1) and (x2, y2) are identical, then this is
# equivalent to omitting the elliptical arc segment entirely.
return []
if phi:
# Our box bezier arcs can't handle rotations directly
# move to a well known point, eliminate phi and transform the other point
mx = mmult(rotate(-phi), translate(-x1, -y1))
tx2, ty2 = transformPoint(mx, (x2, y2))
# Convert to box form in unrotated coords
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
0, 0, tx2, ty2, fA, fS, rx, ry
)
bp = bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
# Re-rotate by the desired angle and add back the translation
mx = mmult(translate(x1, y1), rotate(phi))
res = []
for x1, y1, x2, y2, x3, y3, x4, y4 in bp:
res.append(
transformPoint(mx, (x1, y1)) + transformPoint(mx, (x2, y2)) +
transformPoint(mx, (x3, y3)) + transformPoint(mx, (x4, y4))
)
return res
else:
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
x1, y1, x2, y2, fA, fS, rx, ry
)
return bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
|
deeplook/svglib | svglib/utils.py | end_point_to_center_parameters | python | def end_point_to_center_parameters(x1, y1, x2, y2, fA, fS, rx, ry, phi=0):
'''
See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5
note that we reduce phi to zero outside this routine
'''
rx = fabs(rx)
ry = fabs(ry)
# step 1
if phi:
phi_rad = radians(phi)
sin_phi = sin(phi_rad)
cos_phi = cos(phi_rad)
tx = 0.5 * (x1 - x2)
ty = 0.5 * (y1 - y2)
x1d = cos_phi * tx - sin_phi * ty
y1d = sin_phi * tx + cos_phi * ty
else:
x1d = 0.5 * (x1 - x2)
y1d = 0.5 * (y1 - y2)
# step 2
# we need to calculate
# (rx*rx*ry*ry-rx*rx*y1d*y1d-ry*ry*x1d*x1d)
# -----------------------------------------
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# that is equivalent to
#
# rx*rx*ry*ry
# = ----------------------------- - 1
# (rx*rx*y1d*y1d+ry*ry*x1d*x1d)
#
# 1
# = -------------------------------- - 1
# x1d*x1d/(rx*rx) + y1d*y1d/(ry*ry)
#
# = 1/r - 1
#
# it turns out r is what they recommend checking
# for the negative radicand case
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
if r > 1:
rr = sqrt(r)
rx *= rr
ry *= rr
r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry)
elif r != 0:
r = 1 / r - 1
if -1e-10 < r < 0:
r = 0
r = sqrt(r)
if fA == fS:
r = -r
cxd = (r * rx * y1d) / ry
cyd = -(r * ry * x1d) / rx
# step 3
if phi:
cx = cos_phi * cxd - sin_phi * cyd + 0.5 * (x1 + x2)
cy = sin_phi * cxd + cos_phi * cyd + 0.5 * (y1 + y2)
else:
cx = cxd + 0.5 * (x1 + x2)
cy = cyd + 0.5 * (y1 + y2)
# step 4
theta1 = vector_angle((1, 0), ((x1d - cxd) / rx, (y1d - cyd) / ry))
dtheta = vector_angle(
((x1d - cxd) / rx, (y1d - cyd) / ry),
((-x1d - cxd) / rx, (-y1d - cyd) / ry)
) % 360
if fS == 0 and dtheta > 0:
dtheta -= 360
elif fS == 1 and dtheta < 0:
dtheta += 360
return cx, cy, rx, ry, -theta1, -dtheta | See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5
note that we reduce phi to zero outside this routine | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/utils.py#L103-L178 | [
"def vector_angle(u, v):\n d = hypot(*u) * hypot(*v)\n if d == 0:\n return 0\n c = (u[0] * v[0] + u[1] * v[1]) / d\n if c < -1:\n c = -1\n elif c > 1:\n c = 1\n s = u[0] * v[1] - u[1] * v[0]\n return degrees(copysign(acos(c), s))\n"
] | """
This is a collection of utilities used by the ``svglib`` code module.
"""
import re
from math import acos, ceil, copysign, cos, degrees, fabs, hypot, radians, sin, sqrt
from reportlab.graphics.shapes import mmult, rotate, translate, transformPoint
def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res
def normalise_svg_path(attr):
"""Normalise SVG path.
This basically introduces operator codes for multi-argument
parameters. Also, it fixes sequences of consecutive M or m
operators to MLLL... and mlll... operators. It adds an empty
list as argument for Z and z only in order to make the resul-
ting list easier to iterate over.
E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z"
-> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
"""
# operator codes mapped to the minimum number of expected arguments
ops = {
'A': 7, 'a': 7,
'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4,
'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1,
'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0,
}
op_keys = ops.keys()
# do some preprocessing
result = []
groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I)
op = None
for item in groups:
if item.strip() == '':
continue
if item in op_keys:
# fix sequences of M to one M plus a sequence of L operators,
# same for m and l.
if item == 'M' and item == op:
op = 'L'
elif item == 'm' and item == op:
op = 'l'
else:
op = item
if ops[op] == 0: # Z, z
result.extend([op, []])
else:
result.extend(split_floats(op, ops[op], item))
op = result[-2] # Remember last op
return result
def convert_quadratic_to_cubic_path(q0, q1, q2):
"""
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
"""
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3
# ***********************************************
# Helper functions for elliptical arc conversion.
# ***********************************************
def vector_angle(u, v):
d = hypot(*u) * hypot(*v)
if d == 0:
return 0
c = (u[0] * v[0] + u[1] * v[1]) / d
if c < -1:
c = -1
elif c > 1:
c = 1
s = u[0] * v[1] - u[1] * v[0]
return degrees(copysign(acos(c), s))
def bezier_arc_from_centre(cx, cy, rx, ry, start_ang=0, extent=90):
if abs(extent) <= 90:
nfrag = 1
frag_angle = float(extent)
else:
nfrag = int(ceil(abs(extent) / 90.))
frag_angle = float(extent) / nfrag
if frag_angle == 0:
return []
frag_rad = radians(frag_angle)
half_rad = frag_rad * 0.5
kappa = abs(4. / 3. * (1. - cos(half_rad)) / sin(half_rad))
if frag_angle < 0:
kappa = -kappa
point_list = []
theta1 = radians(start_ang)
start_rad = theta1 + frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
for i in range(nfrag):
c0 = c1
s0 = s1
theta1 = start_rad + i * frag_rad
c1 = cos(theta1)
s1 = sin(theta1)
point_list.append((cx + rx * c0,
cy - ry * s0,
cx + rx * (c0 - kappa * s0),
cy - ry * (s0 + kappa * c0),
cx + rx * (c1 + kappa * s1),
cy - ry * (s1 - kappa * c1),
cx + rx * c1,
cy - ry * s1))
return point_list
def bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2):
if (x1 == x2 and y1 == y2):
# From https://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes:
# If the endpoints (x1, y1) and (x2, y2) are identical, then this is
# equivalent to omitting the elliptical arc segment entirely.
return []
if phi:
# Our box bezier arcs can't handle rotations directly
# move to a well known point, eliminate phi and transform the other point
mx = mmult(rotate(-phi), translate(-x1, -y1))
tx2, ty2 = transformPoint(mx, (x2, y2))
# Convert to box form in unrotated coords
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
0, 0, tx2, ty2, fA, fS, rx, ry
)
bp = bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
# Re-rotate by the desired angle and add back the translation
mx = mmult(translate(x1, y1), rotate(phi))
res = []
for x1, y1, x2, y2, x3, y3, x4, y4 in bp:
res.append(
transformPoint(mx, (x1, y1)) + transformPoint(mx, (x2, y2)) +
transformPoint(mx, (x3, y3)) + transformPoint(mx, (x4, y4))
)
return res
else:
cx, cy, rx, ry, start_ang, extent = end_point_to_center_parameters(
x1, y1, x2, y2, fA, fS, rx, ry
)
return bezier_arc_from_centre(cx, cy, rx, ry, start_ang, extent)
|
stephenmcd/django-socketio | django_socketio/views.py | socketio | python | def socketio(request):
context = {}
socket = SocketIOChannelProxy(request.environ["socketio"])
client_start(request, socket, context)
try:
if socket.on_connect():
events.on_connect.send(request, socket, context)
while True:
messages = socket.recv()
if not messages and not socket.connected():
events.on_disconnect.send(request, socket, context)
break
# Subscribe and unsubscribe messages are in two parts, the
# name of either and the channel, so we use an iterator that
# lets us jump a step in iteration to grab the channel name
# for these.
messages = iter(messages)
for message in messages:
if message == "__subscribe__":
message = messages.next()
message_type = "subscribe"
socket.subscribe(message)
events.on_subscribe.send(request, socket, context, message)
elif message == "__unsubscribe__":
message = messages.next()
message_type = "unsubscribe"
socket.unsubscribe(message)
events.on_unsubscribe.send(request, socket, context, message)
else:
# Socket.IO sends arrays as individual messages, so
# they're put into an object in socketio_scripts.html
# and given the __array__ key so that they can be
# handled consistently in the on_message event.
message_type = "message"
if message == "__array__":
message = messages.next()
events.on_message.send(request, socket, context, message)
log_message = format_log(request, message_type, message)
if log_message:
socket.handler.server.log.write(log_message)
except Exception, exception:
from traceback import print_exc
print_exc()
events.on_error.send(request, socket, context, exception)
client_end(request, socket, context)
return HttpResponse("") | Socket.IO handler - maintains the lifecycle of a Socket.IO
request, sending the each of the events. Also handles
adding/removing request/socket pairs to the CLIENTS dict
which is used for sending on_finish events when the server
stops. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/views.py#L10-L62 | [
"def client_start(request, socket, context):\n \"\"\"\n Adds the client triple to CLIENTS.\n \"\"\"\n CLIENTS[socket.session.session_id] = (request, socket, context)\n",
"def client_end(request, socket, context):\n \"\"\"\n Handles cleanup when a session ends for the given client triple.\n Sends unsubscribe and finish events, actually unsubscribes from\n any channels subscribed to, and removes the client triple from\n CLIENTS.\n \"\"\"\n # Send the unsubscribe event prior to actually unsubscribing, so\n # that the finish event can still match channels if applicable.\n for channel in socket.channels:\n events.on_unsubscribe.send(request, socket, context, channel)\n events.on_finish.send(request, socket, context)\n # Actually unsubscribe to cleanup channel data.\n for channel in socket.channels[:]:\n socket.unsubscribe(channel)\n # Remove the client.\n del CLIENTS[socket.session.session_id]\n",
"def send(self, request, socket, context, *args):\n \"\"\"\n When an event is sent, run all relevant handlers. Relevant\n handlers are those without a channel pattern when the given\n socket is not subscribed to any particular channel, or the\n handlers with a channel pattern that matches any of the\n channels that the given socket is subscribed to.\n\n In the case of subscribe/unsubscribe, match the channel arg\n being sent to the channel pattern.\n \"\"\"\n for handler, pattern in self.handlers:\n no_channel = not pattern and not socket.channels\n if self.name.endswith(\"subscribe\") and pattern:\n matches = [pattern.match(args[0])]\n else:\n matches = [pattern.match(c) for c in socket.channels if pattern]\n if no_channel or filter(None, matches):\n handler(request, socket, context, *args)\n"
] |
from django.http import HttpResponse
from django_socketio import events
from django_socketio.channels import SocketIOChannelProxy
from django_socketio.clients import client_start, client_end
from django_socketio.utils import format_log
|
stephenmcd/django-socketio | django_socketio/clients.py | client_start | python | def client_start(request, socket, context):
CLIENTS[socket.session.session_id] = (request, socket, context) | Adds the client triple to CLIENTS. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L11-L15 | null |
from django_socketio import events
# Maps open Socket.IO session IDs to request/socket pairs for
# running cleanup code and events when the server is shut down
# or reloaded.
CLIENTS = {}
def client_end(request, socket, context):
"""
Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS.
"""
# Send the unsubscribe event prior to actually unsubscribing, so
# that the finish event can still match channels if applicable.
for channel in socket.channels:
events.on_unsubscribe.send(request, socket, context, channel)
events.on_finish.send(request, socket, context)
# Actually unsubscribe to cleanup channel data.
for channel in socket.channels[:]:
socket.unsubscribe(channel)
# Remove the client.
del CLIENTS[socket.session.session_id]
def client_end_all():
"""
Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded.
"""
for request, socket, context in CLIENTS.values()[:]:
client_end(request, socket, context)
|
stephenmcd/django-socketio | django_socketio/clients.py | client_end | python | def client_end(request, socket, context):
# Send the unsubscribe event prior to actually unsubscribing, so
# that the finish event can still match channels if applicable.
for channel in socket.channels:
events.on_unsubscribe.send(request, socket, context, channel)
events.on_finish.send(request, socket, context)
# Actually unsubscribe to cleanup channel data.
for channel in socket.channels[:]:
socket.unsubscribe(channel)
# Remove the client.
del CLIENTS[socket.session.session_id] | Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L18-L34 | [
"def unsubscribe(self, channel):\n \"\"\"\n Remove the channel from this socket's channels, and from the\n list of subscribed session IDs for the channel. Return False\n if not subscribed, otherwise True.\n \"\"\"\n try:\n CHANNELS[channel].remove(self.socket.session.session_id)\n self.channels.remove(channel)\n except ValueError:\n return False\n return True\n",
"def send(self, request, socket, context, *args):\n \"\"\"\n When an event is sent, run all relevant handlers. Relevant\n handlers are those without a channel pattern when the given\n socket is not subscribed to any particular channel, or the\n handlers with a channel pattern that matches any of the\n channels that the given socket is subscribed to.\n\n In the case of subscribe/unsubscribe, match the channel arg\n being sent to the channel pattern.\n \"\"\"\n for handler, pattern in self.handlers:\n no_channel = not pattern and not socket.channels\n if self.name.endswith(\"subscribe\") and pattern:\n matches = [pattern.match(args[0])]\n else:\n matches = [pattern.match(c) for c in socket.channels if pattern]\n if no_channel or filter(None, matches):\n handler(request, socket, context, *args)\n"
] |
from django_socketio import events
# Maps open Socket.IO session IDs to request/socket pairs for
# running cleanup code and events when the server is shut down
# or reloaded.
CLIENTS = {}
def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context)
def client_end_all():
"""
Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded.
"""
for request, socket, context in CLIENTS.values()[:]:
client_end(request, socket, context)
|
stephenmcd/django-socketio | django_socketio/clients.py | client_end_all | python | def client_end_all():
for request, socket, context in CLIENTS.values()[:]:
client_end(request, socket, context) | Performs cleanup on all clients - called by runserver_socketio
when the server is shut down or reloaded. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/clients.py#L37-L43 | [
"def client_end(request, socket, context):\n \"\"\"\n Handles cleanup when a session ends for the given client triple.\n Sends unsubscribe and finish events, actually unsubscribes from\n any channels subscribed to, and removes the client triple from\n CLIENTS.\n \"\"\"\n # Send the unsubscribe event prior to actually unsubscribing, so\n # that the finish event can still match channels if applicable.\n for channel in socket.channels:\n events.on_unsubscribe.send(request, socket, context, channel)\n events.on_finish.send(request, socket, context)\n # Actually unsubscribe to cleanup channel data.\n for channel in socket.channels[:]:\n socket.unsubscribe(channel)\n # Remove the client.\n del CLIENTS[socket.session.session_id]\n"
] |
from django_socketio import events
# Maps open Socket.IO session IDs to request/socket pairs for
# running cleanup code and events when the server is shut down
# or reloaded.
CLIENTS = {}
def client_start(request, socket, context):
"""
Adds the client triple to CLIENTS.
"""
CLIENTS[socket.session.session_id] = (request, socket, context)
def client_end(request, socket, context):
"""
Handles cleanup when a session ends for the given client triple.
Sends unsubscribe and finish events, actually unsubscribes from
any channels subscribed to, and removes the client triple from
CLIENTS.
"""
# Send the unsubscribe event prior to actually unsubscribing, so
# that the finish event can still match channels if applicable.
for channel in socket.channels:
events.on_unsubscribe.send(request, socket, context, channel)
events.on_finish.send(request, socket, context)
# Actually unsubscribe to cleanup channel data.
for channel in socket.channels[:]:
socket.unsubscribe(channel)
# Remove the client.
del CLIENTS[socket.session.session_id]
|
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.subscribe | python | def subscribe(self, channel):
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True | Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L22-L32 | null | class SocketIOChannelProxy(object):
"""
Proxy object for SocketIOProtocol that adds channel subscription
and broadcast.
"""
def __init__(self, socket):
"""
Store the original socket protocol object.
"""
self.socket = socket
self.channels = [] # store our subscribed channels for faster lookup.
def unsubscribe(self, channel):
"""
Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True.
"""
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True
def broadcast_channel(self, message, channel=None):
"""
Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to.
"""
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session)
def send_and_broadcast(self, message):
"""
Shortcut for a socket to broadcast to all sockets itself.
"""
self.send(message)
self.broadcast(message)
def send_and_broadcast_channel(self, message, channel=None):
"""
Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself.
"""
self.send(message)
self.broadcast_channel(message, channel)
def __getattr__(self, name):
"""
Proxy missing attributes to the socket.
"""
return getattr(self.socket, name)
|
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.unsubscribe | python | def unsubscribe(self, channel):
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True | Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L34-L45 | null | class SocketIOChannelProxy(object):
"""
Proxy object for SocketIOProtocol that adds channel subscription
and broadcast.
"""
def __init__(self, socket):
"""
Store the original socket protocol object.
"""
self.socket = socket
self.channels = [] # store our subscribed channels for faster lookup.
def subscribe(self, channel):
"""
Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True.
"""
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True
def broadcast_channel(self, message, channel=None):
"""
Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to.
"""
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session)
def send_and_broadcast(self, message):
"""
Shortcut for a socket to broadcast to all sockets itself.
"""
self.send(message)
self.broadcast(message)
def send_and_broadcast_channel(self, message, channel=None):
"""
Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself.
"""
self.send(message)
self.broadcast_channel(message, channel)
def __getattr__(self, name):
"""
Proxy missing attributes to the socket.
"""
return getattr(self.socket, name)
|
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.broadcast_channel | python | def broadcast_channel(self, message, channel=None):
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session) | Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L47-L61 | null | class SocketIOChannelProxy(object):
"""
Proxy object for SocketIOProtocol that adds channel subscription
and broadcast.
"""
def __init__(self, socket):
"""
Store the original socket protocol object.
"""
self.socket = socket
self.channels = [] # store our subscribed channels for faster lookup.
def subscribe(self, channel):
"""
Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True.
"""
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True
def unsubscribe(self, channel):
"""
Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True.
"""
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True
def send_and_broadcast(self, message):
"""
Shortcut for a socket to broadcast to all sockets itself.
"""
self.send(message)
self.broadcast(message)
def send_and_broadcast_channel(self, message, channel=None):
"""
Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself.
"""
self.send(message)
self.broadcast_channel(message, channel)
def __getattr__(self, name):
"""
Proxy missing attributes to the socket.
"""
return getattr(self.socket, name)
|
stephenmcd/django-socketio | django_socketio/channels.py | SocketIOChannelProxy.send_and_broadcast_channel | python | def send_and_broadcast_channel(self, message, channel=None):
self.send(message)
self.broadcast_channel(message, channel) | Shortcut for a socket to broadcast to all sockets subscribed
to a channel, and itself. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/channels.py#L70-L76 | [
"def broadcast_channel(self, message, channel=None):\n \"\"\"\n Send the given message to all subscribers for the channel\n given. If no channel is given, send to the subscribers for\n all the channels that this socket is subscribed to.\n \"\"\"\n if channel is None:\n channels = self.channels\n else:\n channels = [channel]\n for channel in channels:\n for subscriber in CHANNELS[channel]:\n if subscriber != self.socket.session.session_id:\n session = self.socket.handler.server.sessions[subscriber]\n self._write(message, session)\n"
] | class SocketIOChannelProxy(object):
"""
Proxy object for SocketIOProtocol that adds channel subscription
and broadcast.
"""
def __init__(self, socket):
"""
Store the original socket protocol object.
"""
self.socket = socket
self.channels = [] # store our subscribed channels for faster lookup.
def subscribe(self, channel):
"""
Add the channel to this socket's channels, and to the list of
subscribed session IDs for the channel. Return False if
already subscribed, otherwise True.
"""
if channel in self.channels:
return False
CHANNELS[channel].append(self.socket.session.session_id)
self.channels.append(channel)
return True
def unsubscribe(self, channel):
"""
Remove the channel from this socket's channels, and from the
list of subscribed session IDs for the channel. Return False
if not subscribed, otherwise True.
"""
try:
CHANNELS[channel].remove(self.socket.session.session_id)
self.channels.remove(channel)
except ValueError:
return False
return True
def broadcast_channel(self, message, channel=None):
"""
Send the given message to all subscribers for the channel
given. If no channel is given, send to the subscribers for
all the channels that this socket is subscribed to.
"""
if channel is None:
channels = self.channels
else:
channels = [channel]
for channel in channels:
for subscriber in CHANNELS[channel]:
if subscriber != self.socket.session.session_id:
session = self.socket.handler.server.sessions[subscriber]
self._write(message, session)
def send_and_broadcast(self, message):
"""
Shortcut for a socket to broadcast to all sockets itself.
"""
self.send(message)
self.broadcast(message)
def __getattr__(self, name):
"""
Proxy missing attributes to the socket.
"""
return getattr(self.socket, name)
|
stephenmcd/django-socketio | django_socketio/example_project/chat/events.py | message | python | def message(request, socket, context, message):
room = get_object_or_404(ChatRoom, id=message["room"])
if message["action"] == "start":
name = strip_tags(message["name"])
user, created = room.users.get_or_create(name=name)
if not created:
socket.send({"action": "in-use"})
else:
context["user"] = user
users = [u.name for u in room.users.exclude(id=user.id)]
socket.send({"action": "started", "users": users})
user.session = socket.session.session_id
user.save()
joined = {"action": "join", "name": user.name, "id": user.id}
socket.send_and_broadcast_channel(joined)
else:
try:
user = context["user"]
except KeyError:
return
if message["action"] == "message":
message["message"] = strip_tags(message["message"])
message["name"] = user.name
socket.send_and_broadcast_channel(message) | Event handler for a room receiving a message. First validates a
joining user's name and sends them the list of users. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L10-L37 | null |
from django.shortcuts import get_object_or_404
from django.utils.html import strip_tags
from django_socketio import events
from chat.models import ChatRoom
@events.on_message(channel="^room-")
@events.on_finish(channel="^room-")
def finish(request, socket, context):
"""
Event handler for a socket session ending in a room. Broadcast
the user leaving and delete them from the DB.
"""
try:
user = context["user"]
except KeyError:
return
left = {"action": "leave", "name": user.name, "id": user.id}
socket.broadcast_channel(left)
user.delete()
|
stephenmcd/django-socketio | django_socketio/example_project/chat/events.py | finish | python | def finish(request, socket, context):
try:
user = context["user"]
except KeyError:
return
left = {"action": "leave", "name": user.name, "id": user.id}
socket.broadcast_channel(left)
user.delete() | Event handler for a socket session ending in a room. Broadcast
the user leaving and delete them from the DB. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L41-L52 | null |
from django.shortcuts import get_object_or_404
from django.utils.html import strip_tags
from django_socketio import events
from chat.models import ChatRoom
@events.on_message(channel="^room-")
def message(request, socket, context, message):
"""
Event handler for a room receiving a message. First validates a
joining user's name and sends them the list of users.
"""
room = get_object_or_404(ChatRoom, id=message["room"])
if message["action"] == "start":
name = strip_tags(message["name"])
user, created = room.users.get_or_create(name=name)
if not created:
socket.send({"action": "in-use"})
else:
context["user"] = user
users = [u.name for u in room.users.exclude(id=user.id)]
socket.send({"action": "started", "users": users})
user.session = socket.session.session_id
user.save()
joined = {"action": "join", "name": user.name, "id": user.id}
socket.send_and_broadcast_channel(joined)
else:
try:
user = context["user"]
except KeyError:
return
if message["action"] == "message":
message["message"] = strip_tags(message["message"])
message["name"] = user.name
socket.send_and_broadcast_channel(message)
@events.on_finish(channel="^room-")
|
stephenmcd/django-socketio | django_socketio/utils.py | send | python | def send(session_id, message):
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message) | Send a message to the socket for the given session ID. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L14-L22 | null |
from datetime import datetime
from django_socketio.channels import CHANNELS
from django_socketio.clients import CLIENTS
class NoSocket(Exception):
"""
Raised when no clients are available to broadcast to.
"""
def broadcast(message):
"""
Find the first socket and use it to broadcast to all sockets
including the socket itself.
"""
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message)
def broadcast_channel(message, channel):
"""
Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself.
"""
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel)
def format_log(request, message_type, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n"
|
stephenmcd/django-socketio | django_socketio/utils.py | broadcast | python | def broadcast(message):
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message) | Find the first socket and use it to broadcast to all sockets
including the socket itself. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L25-L34 | null |
from datetime import datetime
from django_socketio.channels import CHANNELS
from django_socketio.clients import CLIENTS
class NoSocket(Exception):
"""
Raised when no clients are available to broadcast to.
"""
def send(session_id, message):
"""
Send a message to the socket for the given session ID.
"""
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message)
def broadcast_channel(message, channel):
"""
Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself.
"""
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel)
def format_log(request, message_type, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n"
|
stephenmcd/django-socketio | django_socketio/utils.py | broadcast_channel | python | def broadcast_channel(message, channel):
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel) | Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L37-L46 | null |
from datetime import datetime
from django_socketio.channels import CHANNELS
from django_socketio.clients import CLIENTS
class NoSocket(Exception):
"""
Raised when no clients are available to broadcast to.
"""
def send(session_id, message):
"""
Send a message to the socket for the given session ID.
"""
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message)
def broadcast(message):
"""
Find the first socket and use it to broadcast to all sockets
including the socket itself.
"""
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message)
def format_log(request, message_type, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n"
|
stephenmcd/django-socketio | django_socketio/utils.py | format_log | python | def format_log(request, message_type, message):
from django_socketio.settings import MESSAGE_LOG_FORMAT
if MESSAGE_LOG_FORMAT is None:
return None
now = datetime.now().replace(microsecond=0)
args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now)
return (MESSAGE_LOG_FORMAT % args) + "\n" | Formats a log message similar to gevent's pywsgi request logging. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/utils.py#L49-L58 | null |
from datetime import datetime
from django_socketio.channels import CHANNELS
from django_socketio.clients import CLIENTS
class NoSocket(Exception):
"""
Raised when no clients are available to broadcast to.
"""
def send(session_id, message):
"""
Send a message to the socket for the given session ID.
"""
try:
socket = CLIENTS[session_id][1]
except KeyError:
raise NoSocket("There is no socket with the session ID: " + session_id)
socket.send(message)
def broadcast(message):
"""
Find the first socket and use it to broadcast to all sockets
including the socket itself.
"""
try:
socket = CLIENTS.values()[0][1]
except IndexError:
raise NoSocket("There are no clients.")
socket.send_and_broadcast(message)
def broadcast_channel(message, channel):
"""
Find the first socket for the given channel, and use it to
broadcast to the channel, including the socket itself.
"""
try:
socket = CLIENTS[CHANNELS.get(channel, [])[0]][1]
except (IndexError, KeyError):
raise NoSocket("There are no clients on the channel: " + channel)
socket.send_and_broadcast_channel(message, channel)
|
stephenmcd/django-socketio | django_socketio/management/commands/runserver_socketio.py | Command.get_handler | python | def get_handler(self, *args, **options):
handler = WSGIHandler()
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
except ImportError:
return handler
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler | Returns the django.contrib.staticfiles handler. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/management/commands/runserver_socketio.py#L72-L86 | null | class Command(BaseCommand):
def handle(self, addrport="", *args, **options):
if not addrport:
self.addr = HOST
self.port = PORT
else:
m = match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _, _, _, self.port = m.groups()
# Make the port available here for the path:
# socketio_tags.socketio ->
# socketio_scripts.html ->
# io.Socket JS constructor
# allowing the port to be set as the client-side default there.
environ["DJANGO_SOCKETIO_PORT"] = str(self.port)
start_new_thread(reload_watcher, ())
try:
bind = (self.addr, int(self.port))
print
print "SocketIOServer running on %s:%s" % bind
print
handler = self.get_handler(*args, **options)
server = SocketIOServer(bind, handler, resource="socket.io")
server.serve_forever()
except KeyboardInterrupt:
client_end_all()
if RELOAD:
server.kill()
print
print "Reloading..."
restart_with_reloader()
else:
raise
|
stephenmcd/django-socketio | django_socketio/events.py | Event.send | python | def send(self, request, socket, context, *args):
for handler, pattern in self.handlers:
no_channel = not pattern and not socket.channels
if self.name.endswith("subscribe") and pattern:
matches = [pattern.match(args[0])]
else:
matches = [pattern.match(c) for c in socket.channels if pattern]
if no_channel or filter(None, matches):
handler(request, socket, context, *args) | When an event is sent, run all relevant handlers. Relevant
handlers are those without a channel pattern when the given
socket is not subscribed to any particular channel, or the
handlers with a channel pattern that matches any of the
channels that the given socket is subscribed to.
In the case of subscribe/unsubscribe, match the channel arg
being sent to the channel pattern. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/events.py#L53-L71 | null | class Event(object):
"""
Signal-like object for Socket.IO events that supports
filtering on channels. Registering event handlers is
performed by using the Event instance as a decorator::
@on_message
def message(request, socket, message):
...
Event handlers can also be registered for particular
channels using the channel keyword argument with a
regular expression pattern::
@on_message(channel="^room-")
def message(request, socket, message):
...
The ``on_connect`` event cannot be registered with a
channel pattern since channel subscription occurs
after a connection is established.
"""
def __init__(self, supports_channels=True):
self.supports_channels = supports_channels
self.handlers = []
def __call__(self, handler=None, channel=None):
"""
Decorates the given handler. The event may be called
with only a channel argument, in which case return a
decorator with the channel argument bound.
"""
if handler is None:
def handler_with_channel(handler):
return self.__call__(handler, channel)
return handler_with_channel
if channel:
if not self.supports_channels:
raise EventError("The %s event does not support channels so "
"the handler `%s` could not be registered" %
(self.name, handler.__name__))
channel = re.compile(channel)
self.handlers.append((handler, channel))
|
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | rooms | python | def rooms(request, template="rooms.html"):
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context) | Homepage - lists all rooms. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L9-L14 | null |
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404, render, redirect
from django_socketio import broadcast, broadcast_channel, NoSocket
from chat.models import ChatRoom
def room(request, slug, template="room.html"):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context)
def create(request):
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms)
@user_passes_test(lambda user: user.is_staff)
def system_message(request, template="system_message.html"):
context = {"rooms": ChatRoom.objects.all()}
if request.method == "POST":
room = request.POST["room"]
data = {"action": "system", "message": request.POST["message"]}
try:
if room:
broadcast_channel(data, channel="room-" + room)
else:
broadcast(data)
except NoSocket, e:
context["message"] = e
else:
context["message"] = "Message sent"
return render(request, template, context)
|
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | room | python | def room(request, slug, template="room.html"):
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context) | Show a room. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L17-L22 | null |
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404, render, redirect
from django_socketio import broadcast, broadcast_channel, NoSocket
from chat.models import ChatRoom
def rooms(request, template="rooms.html"):
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context)
def create(request):
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms)
@user_passes_test(lambda user: user.is_staff)
def system_message(request, template="system_message.html"):
context = {"rooms": ChatRoom.objects.all()}
if request.method == "POST":
room = request.POST["room"]
data = {"action": "system", "message": request.POST["message"]}
try:
if room:
broadcast_channel(data, channel="room-" + room)
else:
broadcast(data)
except NoSocket, e:
context["message"] = e
else:
context["message"] = "Message sent"
return render(request, template, context)
|
stephenmcd/django-socketio | django_socketio/example_project/chat/views.py | create | python | def create(request):
name = request.POST.get("name")
if name:
room, created = ChatRoom.objects.get_or_create(name=name)
return redirect(room)
return redirect(rooms) | Handles post from the "Add room" form on the homepage, and
redirects to the new room. | train | https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/views.py#L25-L34 | null |
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404, render, redirect
from django_socketio import broadcast, broadcast_channel, NoSocket
from chat.models import ChatRoom
def rooms(request, template="rooms.html"):
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.objects.all()}
return render(request, template, context)
def room(request, slug, template="room.html"):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render(request, template, context)
@user_passes_test(lambda user: user.is_staff)
def system_message(request, template="system_message.html"):
context = {"rooms": ChatRoom.objects.all()}
if request.method == "POST":
room = request.POST["room"]
data = {"action": "system", "message": request.POST["message"]}
try:
if room:
broadcast_channel(data, channel="room-" + room)
else:
broadcast(data)
except NoSocket, e:
context["message"] = e
else:
context["message"] = "Message sent"
return render(request, template, context)
|
brean/python-pathfinding | pathfinding/core/util.py | backtrace | python | def backtrace(node):
path = [(node.x, node.y)]
while node.parent:
node = node.parent
path.append((node.x, node.y))
path.reverse()
return path | Backtrace according to the parent records and return the path.
(including both start and end nodes) | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L10-L20 | null | # -*- coding: utf-8 -*-
import math
import copy
# square root of 2 for diagonal distance
SQRT2 = math.sqrt(2)
def bi_backtrace(node_a, node_b):
"""
Backtrace from start and end node, returns the path for bi-directional A*
(including both start and end nodes)
"""
path_a = backtrace(node_a)
path_b = backtrace(node_b)
path_b.reverse()
return path_a + path_b
def raytrace(coords_a, coords_b):
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = x1 - x0
dy = y1 - y0
t = 0
grid_pos = [x0, y0]
t_for_one = \
abs(1.0 / dx) if dx > 0 else 10000, \
abs(1.0 / dy) if dy > 0 else 10000
frac_start_pos = (x0 + .5) - x0, (y0 + .5) - y0
t_for_next_border = [
(1 - frac_start_pos[0] if dx < 0 else frac_start_pos[0]) * t_for_one[0],
(1 - frac_start_pos[1] if dx < 0 else frac_start_pos[1]) * t_for_one[1]
]
step = \
1 if dx >= 0 else -1, \
1 if dy >= 0 else -1
while t <= 1:
line.append(copy.copy(grid_pos))
index = 0 if t_for_next_border[0] <= t_for_next_border[1] else 1
t = t_for_next_border[index]
t_for_next_border[index] += t_for_one[index]
grid_pos[index] += step[index]
return line
def bresenham(coords_a, coords_b):
'''
Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
'''
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while True:
line += [[x0, y0]]
if x0 == x1 and y0 == y1:
break
e2 = err * 2
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
return line
def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded
def smoothen_path(grid, path, use_raytrace=False):
x0, y0 = path[0]
sx, sy = path[0]
new_path = [[sx, sy]]
interpolate = raytrace if use_raytrace else bresenham
last_valid = path[1]
for coord in path[2:-1]:
line = interpolate([sx, sy], coord)
blocked = False
for test_coord in line[1:]:
if not grid.walkable(test_coord[0], test_coord[1]):
blocked = True
break
if not blocked:
new_path.append(last_valid)
sx, sy = last_valid
last_valid = coord
new_path.append(path[-1])
return new_path
|
brean/python-pathfinding | pathfinding/core/util.py | bi_backtrace | python | def bi_backtrace(node_a, node_b):
path_a = backtrace(node_a)
path_b = backtrace(node_b)
path_b.reverse()
return path_a + path_b | Backtrace from start and end node, returns the path for bi-directional A*
(including both start and end nodes) | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L23-L31 | [
"def backtrace(node):\n \"\"\"\n Backtrace according to the parent records and return the path.\n (including both start and end nodes)\n \"\"\"\n path = [(node.x, node.y)]\n while node.parent:\n node = node.parent\n path.append((node.x, node.y))\n path.reverse()\n return path\n"
] | # -*- coding: utf-8 -*-
import math
import copy
# square root of 2 for diagonal distance
SQRT2 = math.sqrt(2)
def backtrace(node):
"""
Backtrace according to the parent records and return the path.
(including both start and end nodes)
"""
path = [(node.x, node.y)]
while node.parent:
node = node.parent
path.append((node.x, node.y))
path.reverse()
return path
def raytrace(coords_a, coords_b):
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = x1 - x0
dy = y1 - y0
t = 0
grid_pos = [x0, y0]
t_for_one = \
abs(1.0 / dx) if dx > 0 else 10000, \
abs(1.0 / dy) if dy > 0 else 10000
frac_start_pos = (x0 + .5) - x0, (y0 + .5) - y0
t_for_next_border = [
(1 - frac_start_pos[0] if dx < 0 else frac_start_pos[0]) * t_for_one[0],
(1 - frac_start_pos[1] if dx < 0 else frac_start_pos[1]) * t_for_one[1]
]
step = \
1 if dx >= 0 else -1, \
1 if dy >= 0 else -1
while t <= 1:
line.append(copy.copy(grid_pos))
index = 0 if t_for_next_border[0] <= t_for_next_border[1] else 1
t = t_for_next_border[index]
t_for_next_border[index] += t_for_one[index]
grid_pos[index] += step[index]
return line
def bresenham(coords_a, coords_b):
'''
Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
'''
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while True:
line += [[x0, y0]]
if x0 == x1 and y0 == y1:
break
e2 = err * 2
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
return line
def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded
def smoothen_path(grid, path, use_raytrace=False):
x0, y0 = path[0]
sx, sy = path[0]
new_path = [[sx, sy]]
interpolate = raytrace if use_raytrace else bresenham
last_valid = path[1]
for coord in path[2:-1]:
line = interpolate([sx, sy], coord)
blocked = False
for test_coord in line[1:]:
if not grid.walkable(test_coord[0], test_coord[1]):
blocked = True
break
if not blocked:
new_path.append(last_valid)
sx, sy = last_valid
last_valid = coord
new_path.append(path[-1])
return new_path
|
brean/python-pathfinding | pathfinding/core/util.py | bresenham | python | def bresenham(coords_a, coords_b):
'''
Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
'''
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while True:
line += [[x0, y0]]
if x0 == x1 and y0 == y1:
break
e2 = err * 2
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
return line | Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L67-L94 | null | # -*- coding: utf-8 -*-
import math
import copy
# square root of 2 for diagonal distance
SQRT2 = math.sqrt(2)
def backtrace(node):
"""
Backtrace according to the parent records and return the path.
(including both start and end nodes)
"""
path = [(node.x, node.y)]
while node.parent:
node = node.parent
path.append((node.x, node.y))
path.reverse()
return path
def bi_backtrace(node_a, node_b):
"""
Backtrace from start and end node, returns the path for bi-directional A*
(including both start and end nodes)
"""
path_a = backtrace(node_a)
path_b = backtrace(node_b)
path_b.reverse()
return path_a + path_b
def raytrace(coords_a, coords_b):
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = x1 - x0
dy = y1 - y0
t = 0
grid_pos = [x0, y0]
t_for_one = \
abs(1.0 / dx) if dx > 0 else 10000, \
abs(1.0 / dy) if dy > 0 else 10000
frac_start_pos = (x0 + .5) - x0, (y0 + .5) - y0
t_for_next_border = [
(1 - frac_start_pos[0] if dx < 0 else frac_start_pos[0]) * t_for_one[0],
(1 - frac_start_pos[1] if dx < 0 else frac_start_pos[1]) * t_for_one[1]
]
step = \
1 if dx >= 0 else -1, \
1 if dy >= 0 else -1
while t <= 1:
line.append(copy.copy(grid_pos))
index = 0 if t_for_next_border[0] <= t_for_next_border[1] else 1
t = t_for_next_border[index]
t_for_next_border[index] += t_for_one[index]
grid_pos[index] += step[index]
return line
def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded
def smoothen_path(grid, path, use_raytrace=False):
x0, y0 = path[0]
sx, sy = path[0]
new_path = [[sx, sy]]
interpolate = raytrace if use_raytrace else bresenham
last_valid = path[1]
for coord in path[2:-1]:
line = interpolate([sx, sy], coord)
blocked = False
for test_coord in line[1:]:
if not grid.walkable(test_coord[0], test_coord[1]):
blocked = True
break
if not blocked:
new_path.append(last_valid)
sx, sy = last_valid
last_valid = coord
new_path.append(path[-1])
return new_path
|
brean/python-pathfinding | pathfinding/core/util.py | expand_path | python | def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded | Given a compressed path, return a new path that has all the segments
in it interpolated. | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L97-L108 | [
"def bresenham(coords_a, coords_b):\n '''\n Given the start and end coordinates, return all the coordinates lying\n on the line formed by these coordinates, based on Bresenham's algorithm.\n http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification\n '''\n line = []\n x0, y0 = coords_a\n x1, y1 = coords_b\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n sx = 1 if x0 < x1 else -1\n sy = 1 if y0 < y1 else -1\n err = dx - dy\n\n while True:\n line += [[x0, y0]]\n if x0 == x1 and y0 == y1:\n break\n e2 = err * 2\n if e2 > -dy:\n err = err - dy\n x0 = x0 + sx\n if e2 < dx:\n err = err + dx\n y0 = y0 + sy\n\n return line\n"
] | # -*- coding: utf-8 -*-
import math
import copy
# square root of 2 for diagonal distance
SQRT2 = math.sqrt(2)
def backtrace(node):
"""
Backtrace according to the parent records and return the path.
(including both start and end nodes)
"""
path = [(node.x, node.y)]
while node.parent:
node = node.parent
path.append((node.x, node.y))
path.reverse()
return path
def bi_backtrace(node_a, node_b):
"""
Backtrace from start and end node, returns the path for bi-directional A*
(including both start and end nodes)
"""
path_a = backtrace(node_a)
path_b = backtrace(node_b)
path_b.reverse()
return path_a + path_b
def raytrace(coords_a, coords_b):
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = x1 - x0
dy = y1 - y0
t = 0
grid_pos = [x0, y0]
t_for_one = \
abs(1.0 / dx) if dx > 0 else 10000, \
abs(1.0 / dy) if dy > 0 else 10000
frac_start_pos = (x0 + .5) - x0, (y0 + .5) - y0
t_for_next_border = [
(1 - frac_start_pos[0] if dx < 0 else frac_start_pos[0]) * t_for_one[0],
(1 - frac_start_pos[1] if dx < 0 else frac_start_pos[1]) * t_for_one[1]
]
step = \
1 if dx >= 0 else -1, \
1 if dy >= 0 else -1
while t <= 1:
line.append(copy.copy(grid_pos))
index = 0 if t_for_next_border[0] <= t_for_next_border[1] else 1
t = t_for_next_border[index]
t_for_next_border[index] += t_for_one[index]
grid_pos[index] += step[index]
return line
def bresenham(coords_a, coords_b):
'''
Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
'''
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while True:
line += [[x0, y0]]
if x0 == x1 and y0 == y1:
break
e2 = err * 2
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
return line
def smoothen_path(grid, path, use_raytrace=False):
x0, y0 = path[0]
sx, sy = path[0]
new_path = [[sx, sy]]
interpolate = raytrace if use_raytrace else bresenham
last_valid = path[1]
for coord in path[2:-1]:
line = interpolate([sx, sy], coord)
blocked = False
for test_coord in line[1:]:
if not grid.walkable(test_coord[0], test_coord[1]):
blocked = True
break
if not blocked:
new_path.append(last_valid)
sx, sy = last_valid
last_valid = coord
new_path.append(path[-1])
return new_path
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.calc_cost | python | def calc_cost(self, node_a, node_b):
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng | get the distance between current node and the neighbor (cost) | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L57-L72 | null | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.apply_heuristic | python | def apply_heuristic(self, node_a, node_b, heuristic=None):
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y)) | helper function to apply heuristic | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L74-L82 | null | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.find_neighbors | python | def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement) | find neighbor, same for Djikstra, A*, Bi-A*, IDA* | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L84-L90 | null | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.keep_running | python | def keep_running(self):
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit)) | check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L92-L106 | null | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.process_node | python | def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node) | we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms) | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L108-L140 | [
"def calc_cost(self, node_a, node_b):\n \"\"\"\n get the distance between current node and the neighbor (cost)\n \"\"\"\n if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:\n # direct neighbor - distance is 1\n ng = 1\n else:\n # not a direct neighbor - diagonal movement\n ng = SQRT2\n\n # weight for weighted algorithms\n if self.weighted:\n ng *= node_b.weight\n\n return node_a.g + ng\n",
"def apply_heuristic(self, node_a, node_b, heuristic=None):\n \"\"\"\n helper function to apply heuristic\n \"\"\"\n if not heuristic:\n heuristic = self.heuristic\n return heuristic(\n abs(node_a.x - node_b.x),\n abs(node_a.y - node_b.y))\n"
] | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
|
brean/python-pathfinding | pathfinding/finder/finder.py | Finder.find_path | python | def find_path(self, start, end, grid):
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs | find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return: | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L142-L166 | [
"def check_neighbors(self, start, end, grid, open_list,\n open_value=True, backtrace_by=None):\n \"\"\"\n find next path segment based on given node\n (or return path if we found the end)\n \"\"\"\n # pop node with minimum 'f' value\n node = heapq.nsmallest(1, open_list)[0]\n open_list.remove(node)\n node.closed = True\n\n # if reached the end position, construct the path and return it\n # (ignored for bi-directional a*, there we look for a neighbor that is\n # part of the oncoming path)\n if not backtrace_by and node == end:\n return backtrace(end)\n\n # get neighbors of the current node\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed:\n # already visited last minimum f value\n continue\n if backtrace_by and neighbor.opened == backtrace_by:\n # found the oncoming path\n if backtrace_by == BY_END:\n return bi_backtrace(node, neighbor)\n else:\n return bi_backtrace(neighbor, node)\n\n # check if the neighbor has not been inspected yet, or\n # can be reached with smaller cost from the current node\n self.process_node(neighbor, node, end, open_list, open_value)\n\n # the end has not been reached (yet) keep the find_path loop running\n return None\n",
"def keep_running(self):\n \"\"\"\n check, if we run into time or iteration constrains.\n :returns: True if we keep running and False if we run into a constraint\n \"\"\"\n if self.runs >= self.max_runs:\n raise ExecutionRunsException(\n '{} run into barrier of {} iterations without '\n 'finding the destination'.format(\n self.__class__.__name__, self.max_runs))\n\n if time.time() - self.start_time >= self.time_limit:\n raise ExecutionTimeException(\n '{} took longer than {} seconds, aborting!'.format(\n self.__class__.__name__, self.time_limit))\n"
] | class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
|
brean/python-pathfinding | pathfinding/finder/bi_a_star.py | BiAStarFinder.find_path | python | def find_path(self, start, end, grid):
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start_open_list = [start]
start.g = 0
start.f = 0
start.opened = BY_START
end_open_list = [end]
end.g = 0
end.f = 0
end.opened = BY_END
while len(start_open_list) > 0 and len(end_open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, start_open_list,
open_value=BY_START,
backtrace_by=BY_END)
if path:
return path, self.runs
self.runs += 1
self.keep_running()
path = self.check_neighbors(end, start, grid, end_open_list,
open_value=BY_END,
backtrace_by=BY_START)
if path:
return path, self.runs
# failed to find path
return [], self.runs | find a path from start to end node on grid using the A* algorithm
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return: | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/bi_a_star.py#L38-L77 | [
"def check_neighbors(self, start, end, grid, open_list,\n open_value=True, backtrace_by=None):\n \"\"\"\n find next path segment based on given node\n (or return path if we found the end)\n \"\"\"\n # pop node with minimum 'f' value\n node = heapq.nsmallest(1, open_list)[0]\n open_list.remove(node)\n node.closed = True\n\n # if reached the end position, construct the path and return it\n # (ignored for bi-directional a*, there we look for a neighbor that is\n # part of the oncoming path)\n if not backtrace_by and node == end:\n return backtrace(end)\n\n # get neighbors of the current node\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed:\n # already visited last minimum f value\n continue\n if backtrace_by and neighbor.opened == backtrace_by:\n # found the oncoming path\n if backtrace_by == BY_END:\n return bi_backtrace(node, neighbor)\n else:\n return bi_backtrace(neighbor, node)\n\n # check if the neighbor has not been inspected yet, or\n # can be reached with smaller cost from the current node\n self.process_node(neighbor, node, end, open_list, open_value)\n\n # the end has not been reached (yet) keep the find_path loop running\n return None\n",
"def keep_running(self):\n \"\"\"\n check, if we run into time or iteration constrains.\n :returns: True if we keep running and False if we run into a constraint\n \"\"\"\n if self.runs >= self.max_runs:\n raise ExecutionRunsException(\n '{} run into barrier of {} iterations without '\n 'finding the destination'.format(\n self.__class__.__name__, self.max_runs))\n\n if time.time() - self.start_time >= self.time_limit:\n raise ExecutionTimeException(\n '{} took longer than {} seconds, aborting!'.format(\n self.__class__.__name__, self.time_limit))\n"
] | class BiAStarFinder(AStarFinder):
"""
Similar to the default A* algorithm from a_star.
"""
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path using Bi-A* algorithm
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
super(BiAStarFinder, self).__init__(
heuristic=heuristic,
weight=weight,
diagonal_movement=diagonal_movement,
time_limit=time_limit,
max_runs=max_runs)
self.weighted = False
|
brean/python-pathfinding | pathfinding/core/node.py | Node.cleanup | python | def cleanup(self):
# cost from this node to the goal
self.h = 0.0
# cost from the start node to this node
self.g = 0.0
# distance from start to this point (f = g + h )
self.f = 0.0
self.opened = 0
self.closed = False
# used for backtracking to the start point
self.parent = None
# used for recurion tracking of IDA*
self.retain_count = 0
# used for IDA* and Jump-Point-Search
self.tested = False | reset all calculated values, fresh start for pathfinding | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/node.py#L30-L52 | null | class Node(object):
"""
basic node, saves X and Y coordinates on some grid and determine if
it is walkable.
"""
def __init__(self, x=0, y=0, walkable=True, weight=1):
# Coordinates
self.x = x
self.y = y
# Whether this node can be walked through.
self.walkable = walkable
# used for weighted algorithms
self.weight = weight
# values used in the finder
self.cleanup()
def __lt__(self, other):
"""
nodes are sorted by f value (see a_star.py)
:param other: compare Node
:return:
"""
return self.f < other.f
|
brean/python-pathfinding | pathfinding/core/grid.py | build_nodes | python | def build_nodes(width, height, matrix=None, inverse=False):
nodes = []
use_matrix = (isinstance(matrix, (tuple, list))) or \
(USE_NUMPY and isinstance(matrix, np.ndarray) and matrix.size > 0)
for y in range(height):
nodes.append([])
for x in range(width):
# 1, '1', True will be walkable
# while others will be obstacles
# if inverse is False, otherwise
# it changes
weight = int(matrix[y][x]) if use_matrix else 1
walkable = weight <= 0 if inverse else weight >= 1
nodes[y].append(Node(x=x, y=y, walkable=walkable, weight=weight))
return nodes | create nodes according to grid size. If a matrix is given it
will be used to determine what nodes are walkable.
:rtype : list | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L11-L32 | null | # -*- coding: utf-8 -*-
from .node import Node
try:
import numpy as np
USE_NUMPY = True
except ImportError:
USE_NUMPY = False
from pathfinding.core.diagonal_movement import DiagonalMovement
class Grid(object):
def __init__(self, width=0, height=0, matrix=None, inverse=False):
"""
a grid represents the map (as 2d-list of nodes).
"""
self.width = width
self.height = height
if isinstance(matrix, (tuple, list)) or (
USE_NUMPY and isinstance(matrix, np.ndarray) and
matrix.size > 0):
self.height = len(matrix)
self.width = self.width = len(matrix[0]) if self.height > 0 else 0
if self.width > 0 and self.height > 0:
self.nodes = build_nodes(self.width, self.height, matrix, inverse)
else:
self.nodes = [[]]
def node(self, x, y):
"""
get node at position
:param x: x pos
:param y: y pos
:return:
"""
return self.nodes[y][x]
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable
def neighbors(self, node, diagonal_movement=DiagonalMovement.never):
"""
get all neighbors of one node
:param node: node
"""
x = node.x
y = node.y
neighbors = []
s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False
# ↑
if self.walkable(x, y - 1):
neighbors.append(self.nodes[y - 1][x])
s0 = True
# →
if self.walkable(x + 1, y):
neighbors.append(self.nodes[y][x + 1])
s1 = True
# ↓
if self.walkable(x, y + 1):
neighbors.append(self.nodes[y + 1][x])
s2 = True
# ←
if self.walkable(x - 1, y):
neighbors.append(self.nodes[y][x - 1])
s3 = True
if diagonal_movement == DiagonalMovement.never:
return neighbors
if diagonal_movement == DiagonalMovement.only_when_no_obstacle:
d0 = s3 and s0
d1 = s0 and s1
d2 = s1 and s2
d3 = s2 and s3
elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:
d0 = s3 or s0
d1 = s0 or s1
d2 = s1 or s2
d3 = s2 or s3
elif diagonal_movement == DiagonalMovement.always:
d0 = d1 = d2 = d3 = True
# ↖
if d0 and self.walkable(x - 1, y - 1):
neighbors.append(self.nodes[y - 1][x - 1])
# ↗
if d1 and self.walkable(x + 1, y - 1):
neighbors.append(self.nodes[y - 1][x + 1])
# ↘
if d2 and self.walkable(x + 1, y + 1):
neighbors.append(self.nodes[y + 1][x + 1])
# ↙
if d3 and self.walkable(x - 1, y + 1):
neighbors.append(self.nodes[y + 1][x - 1])
return neighbors
def cleanup(self):
for y_nodes in self.nodes:
for node in y_nodes:
node.cleanup()
def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
"""
create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return:
"""
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data
|
brean/python-pathfinding | pathfinding/core/grid.py | Grid.inside | python | def inside(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height | check, if field position is inside map
:param x: x pos
:param y: y pos
:return: | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L61-L68 | null | class Grid(object):
def __init__(self, width=0, height=0, matrix=None, inverse=False):
"""
a grid represents the map (as 2d-list of nodes).
"""
self.width = width
self.height = height
if isinstance(matrix, (tuple, list)) or (
USE_NUMPY and isinstance(matrix, np.ndarray) and
matrix.size > 0):
self.height = len(matrix)
self.width = self.width = len(matrix[0]) if self.height > 0 else 0
if self.width > 0 and self.height > 0:
self.nodes = build_nodes(self.width, self.height, matrix, inverse)
else:
self.nodes = [[]]
def node(self, x, y):
"""
get node at position
:param x: x pos
:param y: y pos
:return:
"""
return self.nodes[y][x]
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable
def neighbors(self, node, diagonal_movement=DiagonalMovement.never):
"""
get all neighbors of one node
:param node: node
"""
x = node.x
y = node.y
neighbors = []
s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False
# ↑
if self.walkable(x, y - 1):
neighbors.append(self.nodes[y - 1][x])
s0 = True
# →
if self.walkable(x + 1, y):
neighbors.append(self.nodes[y][x + 1])
s1 = True
# ↓
if self.walkable(x, y + 1):
neighbors.append(self.nodes[y + 1][x])
s2 = True
# ←
if self.walkable(x - 1, y):
neighbors.append(self.nodes[y][x - 1])
s3 = True
if diagonal_movement == DiagonalMovement.never:
return neighbors
if diagonal_movement == DiagonalMovement.only_when_no_obstacle:
d0 = s3 and s0
d1 = s0 and s1
d2 = s1 and s2
d3 = s2 and s3
elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:
d0 = s3 or s0
d1 = s0 or s1
d2 = s1 or s2
d3 = s2 or s3
elif diagonal_movement == DiagonalMovement.always:
d0 = d1 = d2 = d3 = True
# ↖
if d0 and self.walkable(x - 1, y - 1):
neighbors.append(self.nodes[y - 1][x - 1])
# ↗
if d1 and self.walkable(x + 1, y - 1):
neighbors.append(self.nodes[y - 1][x + 1])
# ↘
if d2 and self.walkable(x + 1, y + 1):
neighbors.append(self.nodes[y + 1][x + 1])
# ↙
if d3 and self.walkable(x - 1, y + 1):
neighbors.append(self.nodes[y + 1][x - 1])
return neighbors
def cleanup(self):
for y_nodes in self.nodes:
for node in y_nodes:
node.cleanup()
def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
"""
create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return:
"""
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data
|
brean/python-pathfinding | pathfinding/core/grid.py | Grid.walkable | python | def walkable(self, x, y):
return self.inside(x, y) and self.nodes[y][x].walkable | check, if the tile is inside grid and if it is set as walkable | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L70-L74 | [
"def inside(self, x, y):\n \"\"\"\n check, if field position is inside map\n :param x: x pos\n :param y: y pos\n :return:\n \"\"\"\n return 0 <= x < self.width and 0 <= y < self.height\n"
] | class Grid(object):
def __init__(self, width=0, height=0, matrix=None, inverse=False):
"""
a grid represents the map (as 2d-list of nodes).
"""
self.width = width
self.height = height
if isinstance(matrix, (tuple, list)) or (
USE_NUMPY and isinstance(matrix, np.ndarray) and
matrix.size > 0):
self.height = len(matrix)
self.width = self.width = len(matrix[0]) if self.height > 0 else 0
if self.width > 0 and self.height > 0:
self.nodes = build_nodes(self.width, self.height, matrix, inverse)
else:
self.nodes = [[]]
def node(self, x, y):
"""
get node at position
:param x: x pos
:param y: y pos
:return:
"""
return self.nodes[y][x]
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height
def neighbors(self, node, diagonal_movement=DiagonalMovement.never):
"""
get all neighbors of one node
:param node: node
"""
x = node.x
y = node.y
neighbors = []
s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False
# ↑
if self.walkable(x, y - 1):
neighbors.append(self.nodes[y - 1][x])
s0 = True
# →
if self.walkable(x + 1, y):
neighbors.append(self.nodes[y][x + 1])
s1 = True
# ↓
if self.walkable(x, y + 1):
neighbors.append(self.nodes[y + 1][x])
s2 = True
# ←
if self.walkable(x - 1, y):
neighbors.append(self.nodes[y][x - 1])
s3 = True
if diagonal_movement == DiagonalMovement.never:
return neighbors
if diagonal_movement == DiagonalMovement.only_when_no_obstacle:
d0 = s3 and s0
d1 = s0 and s1
d2 = s1 and s2
d3 = s2 and s3
elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:
d0 = s3 or s0
d1 = s0 or s1
d2 = s1 or s2
d3 = s2 or s3
elif diagonal_movement == DiagonalMovement.always:
d0 = d1 = d2 = d3 = True
# ↖
if d0 and self.walkable(x - 1, y - 1):
neighbors.append(self.nodes[y - 1][x - 1])
# ↗
if d1 and self.walkable(x + 1, y - 1):
neighbors.append(self.nodes[y - 1][x + 1])
# ↘
if d2 and self.walkable(x + 1, y + 1):
neighbors.append(self.nodes[y + 1][x + 1])
# ↙
if d3 and self.walkable(x - 1, y + 1):
neighbors.append(self.nodes[y + 1][x - 1])
return neighbors
def cleanup(self):
for y_nodes in self.nodes:
for node in y_nodes:
node.cleanup()
def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
"""
create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return:
"""
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data
|
brean/python-pathfinding | pathfinding/core/grid.py | Grid.neighbors | python | def neighbors(self, node, diagonal_movement=DiagonalMovement.never):
x = node.x
y = node.y
neighbors = []
s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False
# ↑
if self.walkable(x, y - 1):
neighbors.append(self.nodes[y - 1][x])
s0 = True
# →
if self.walkable(x + 1, y):
neighbors.append(self.nodes[y][x + 1])
s1 = True
# ↓
if self.walkable(x, y + 1):
neighbors.append(self.nodes[y + 1][x])
s2 = True
# ←
if self.walkable(x - 1, y):
neighbors.append(self.nodes[y][x - 1])
s3 = True
if diagonal_movement == DiagonalMovement.never:
return neighbors
if diagonal_movement == DiagonalMovement.only_when_no_obstacle:
d0 = s3 and s0
d1 = s0 and s1
d2 = s1 and s2
d3 = s2 and s3
elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:
d0 = s3 or s0
d1 = s0 or s1
d2 = s1 or s2
d3 = s2 or s3
elif diagonal_movement == DiagonalMovement.always:
d0 = d1 = d2 = d3 = True
# ↖
if d0 and self.walkable(x - 1, y - 1):
neighbors.append(self.nodes[y - 1][x - 1])
# ↗
if d1 and self.walkable(x + 1, y - 1):
neighbors.append(self.nodes[y - 1][x + 1])
# ↘
if d2 and self.walkable(x + 1, y + 1):
neighbors.append(self.nodes[y + 1][x + 1])
# ↙
if d3 and self.walkable(x - 1, y + 1):
neighbors.append(self.nodes[y + 1][x - 1])
return neighbors | get all neighbors of one node
:param node: node | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L76-L135 | [
"def walkable(self, x, y):\n \"\"\"\n check, if the tile is inside grid and if it is set as walkable\n \"\"\"\n return self.inside(x, y) and self.nodes[y][x].walkable\n"
] | class Grid(object):
def __init__(self, width=0, height=0, matrix=None, inverse=False):
"""
a grid represents the map (as 2d-list of nodes).
"""
self.width = width
self.height = height
if isinstance(matrix, (tuple, list)) or (
USE_NUMPY and isinstance(matrix, np.ndarray) and
matrix.size > 0):
self.height = len(matrix)
self.width = self.width = len(matrix[0]) if self.height > 0 else 0
if self.width > 0 and self.height > 0:
self.nodes = build_nodes(self.width, self.height, matrix, inverse)
else:
self.nodes = [[]]
def node(self, x, y):
"""
get node at position
:param x: x pos
:param y: y pos
:return:
"""
return self.nodes[y][x]
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable
def cleanup(self):
for y_nodes in self.nodes:
for node in y_nodes:
node.cleanup()
def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
"""
create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return:
"""
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data
|
brean/python-pathfinding | pathfinding/core/grid.py | Grid.grid_str | python | def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data | create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return: | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L142-L188 | null | class Grid(object):
def __init__(self, width=0, height=0, matrix=None, inverse=False):
"""
a grid represents the map (as 2d-list of nodes).
"""
self.width = width
self.height = height
if isinstance(matrix, (tuple, list)) or (
USE_NUMPY and isinstance(matrix, np.ndarray) and
matrix.size > 0):
self.height = len(matrix)
self.width = self.width = len(matrix[0]) if self.height > 0 else 0
if self.width > 0 and self.height > 0:
self.nodes = build_nodes(self.width, self.height, matrix, inverse)
else:
self.nodes = [[]]
def node(self, x, y):
"""
get node at position
:param x: x pos
:param y: y pos
:return:
"""
return self.nodes[y][x]
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height
def walkable(self, x, y):
"""
check, if the tile is inside grid and if it is set as walkable
"""
return self.inside(x, y) and self.nodes[y][x].walkable
def neighbors(self, node, diagonal_movement=DiagonalMovement.never):
"""
get all neighbors of one node
:param node: node
"""
x = node.x
y = node.y
neighbors = []
s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False
# ↑
if self.walkable(x, y - 1):
neighbors.append(self.nodes[y - 1][x])
s0 = True
# →
if self.walkable(x + 1, y):
neighbors.append(self.nodes[y][x + 1])
s1 = True
# ↓
if self.walkable(x, y + 1):
neighbors.append(self.nodes[y + 1][x])
s2 = True
# ←
if self.walkable(x - 1, y):
neighbors.append(self.nodes[y][x - 1])
s3 = True
if diagonal_movement == DiagonalMovement.never:
return neighbors
if diagonal_movement == DiagonalMovement.only_when_no_obstacle:
d0 = s3 and s0
d1 = s0 and s1
d2 = s1 and s2
d3 = s2 and s3
elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:
d0 = s3 or s0
d1 = s0 or s1
d2 = s1 or s2
d3 = s2 or s3
elif diagonal_movement == DiagonalMovement.always:
d0 = d1 = d2 = d3 = True
# ↖
if d0 and self.walkable(x - 1, y - 1):
neighbors.append(self.nodes[y - 1][x - 1])
# ↗
if d1 and self.walkable(x + 1, y - 1):
neighbors.append(self.nodes[y - 1][x + 1])
# ↘
if d2 and self.walkable(x + 1, y + 1):
neighbors.append(self.nodes[y + 1][x + 1])
# ↙
if d3 and self.walkable(x - 1, y + 1):
neighbors.append(self.nodes[y + 1][x - 1])
return neighbors
def cleanup(self):
for y_nodes in self.nodes:
for node in y_nodes:
node.cleanup()
|
brean/python-pathfinding | pathfinding/finder/a_star.py | AStarFinder.check_neighbors | python | def check_neighbors(self, start, end, grid, open_list,
open_value=True, backtrace_by=None):
# pop node with minimum 'f' value
node = heapq.nsmallest(1, open_list)[0]
open_list.remove(node)
node.closed = True
# if reached the end position, construct the path and return it
# (ignored for bi-directional a*, there we look for a neighbor that is
# part of the oncoming path)
if not backtrace_by and node == end:
return backtrace(end)
# get neighbors of the current node
neighbors = self.find_neighbors(grid, node)
for neighbor in neighbors:
if neighbor.closed:
# already visited last minimum f value
continue
if backtrace_by and neighbor.opened == backtrace_by:
# found the oncoming path
if backtrace_by == BY_END:
return bi_backtrace(node, neighbor)
else:
return bi_backtrace(neighbor, node)
# check if the neighbor has not been inspected yet, or
# can be reached with smaller cost from the current node
self.process_node(neighbor, node, end, open_list, open_value)
# the end has not been reached (yet) keep the find_path loop running
return None | find next path segment based on given node
(or return path if we found the end) | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/a_star.py#L42-L77 | [
"def backtrace(node):\n \"\"\"\n Backtrace according to the parent records and return the path.\n (including both start and end nodes)\n \"\"\"\n path = [(node.x, node.y)]\n while node.parent:\n node = node.parent\n path.append((node.x, node.y))\n path.reverse()\n return path\n",
"def bi_backtrace(node_a, node_b):\n \"\"\"\n Backtrace from start and end node, returns the path for bi-directional A*\n (including both start and end nodes)\n \"\"\"\n path_a = backtrace(node_a)\n path_b = backtrace(node_b)\n path_b.reverse()\n return path_a + path_b\n",
"def find_neighbors(self, grid, node, diagonal_movement=None):\n '''\n find neighbor, same for Djikstra, A*, Bi-A*, IDA*\n '''\n if not diagonal_movement:\n diagonal_movement = self.diagonal_movement\n return grid.neighbors(node, diagonal_movement=diagonal_movement)\n",
"def process_node(self, node, parent, end, open_list, open_value=True):\n '''\n we check if the given node is path of the path by calculating its\n cost and add or remove it from our path\n :param node: the node we like to test\n (the neighbor in A* or jump-node in JumpPointSearch)\n :param parent: the parent node (the current node we like to test)\n :param end: the end point to calculate the cost of the path\n :param open_list: the list that keeps track of our current path\n :param open_value: needed if we like to set the open list to something\n else than True (used for bi-directional algorithms)\n\n '''\n # calculate cost from current node (parent) to the next node (neighbor)\n ng = self.calc_cost(parent, node)\n\n if not node.opened or ng < node.g:\n node.g = ng\n node.h = node.h or \\\n self.apply_heuristic(node, end) * self.weight\n # f is the estimated total cost from start to goal\n node.f = node.g + node.h\n node.parent = parent\n\n if not node.opened:\n heapq.heappush(open_list, node)\n node.opened = open_value\n else:\n # the node can be reached with smaller cost.\n # Since its f value has been updated, we have to\n # update its position in the open list\n open_list.remove(node)\n heapq.heappush(open_list, node)\n"
] | class AStarFinder(Finder):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path using A* algorithm
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
super(AStarFinder, self).__init__(
heuristic=heuristic,
weight=weight,
diagonal_movement=diagonal_movement,
time_limit=time_limit,
max_runs=max_runs)
if not heuristic:
if diagonal_movement == DiagonalMovement.never:
self.heuristic = manhatten
else:
# When diagonal movement is allowed the manhattan heuristic is
# not admissible it should be octile instead
self.heuristic = octile
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid using the A* algorithm
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
start.g = 0
start.f = 0
return super(AStarFinder, self).find_path(start, end, grid)
|
brean/python-pathfinding | pathfinding/finder/a_star.py | AStarFinder.find_path | python | def find_path(self, start, end, grid):
start.g = 0
start.f = 0
return super(AStarFinder, self).find_path(start, end, grid) | find a path from start to end node on grid using the A* algorithm
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return: | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/a_star.py#L79-L89 | [
"def find_path(self, start, end, grid):\n \"\"\"\n find a path from start to end node on grid by iterating over\n all neighbors of a node (see check_neighbors)\n :param start: start node\n :param end: end node\n :param grid: grid that stores all possible steps/tiles as 2D-list\n :return:\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n start.opened = True\n\n open_list = [start]\n\n while len(open_list) > 0:\n self.runs += 1\n self.keep_running()\n\n path = self.check_neighbors(start, end, grid, open_list)\n if path:\n return path, self.runs\n\n # failed to find path\n return [], self.runs\n"
] | class AStarFinder(Finder):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path using A* algorithm
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
super(AStarFinder, self).__init__(
heuristic=heuristic,
weight=weight,
diagonal_movement=diagonal_movement,
time_limit=time_limit,
max_runs=max_runs)
if not heuristic:
if diagonal_movement == DiagonalMovement.never:
self.heuristic = manhatten
else:
# When diagonal movement is allowed the manhattan heuristic is
# not admissible it should be octile instead
self.heuristic = octile
def check_neighbors(self, start, end, grid, open_list,
open_value=True, backtrace_by=None):
"""
find next path segment based on given node
(or return path if we found the end)
"""
# pop node with minimum 'f' value
node = heapq.nsmallest(1, open_list)[0]
open_list.remove(node)
node.closed = True
# if reached the end position, construct the path and return it
# (ignored for bi-directional a*, there we look for a neighbor that is
# part of the oncoming path)
if not backtrace_by and node == end:
return backtrace(end)
# get neighbors of the current node
neighbors = self.find_neighbors(grid, node)
for neighbor in neighbors:
if neighbor.closed:
# already visited last minimum f value
continue
if backtrace_by and neighbor.opened == backtrace_by:
# found the oncoming path
if backtrace_by == BY_END:
return bi_backtrace(node, neighbor)
else:
return bi_backtrace(neighbor, node)
# check if the neighbor has not been inspected yet, or
# can be reached with smaller cost from the current node
self.process_node(neighbor, node, end, open_list, open_value)
# the end has not been reached (yet) keep the find_path loop running
return None
|
anthill/koala | koala/utils.py | max_dimension | python | def max_dimension(cellmap, sheet = None):
cells = list(cellmap.values())
rows = 0
cols = 0
for cell in cells:
if sheet is None or cell.sheet == sheet:
rows = max(rows, int(cell.row))
cols = max(cols, int(col2num(cell.col)))
return (rows, cols) | This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/utils.py#L103-L121 | [
"def col2num(col):\n\n if col in col2num_cache:\n return col2num_cache[col]\n else:\n if not col:\n raise Exception(\"Column may not be empty\")\n\n tot = 0\n for i,c in enumerate([c for c in col[::-1] if c != \"$\"]):\n if c == '$': continue\n tot += (ord(c)-64) * 26 ** i\n\n col2num_cache[col] = tot\n return tot\n"
] | # cython: profile=True
from __future__ import absolute_import, division
import collections
import numbers
import re
import datetime as dt
from six import string_types
from openpyxl.compat import unicode
from .ExcelError import ExcelError
ASCII = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excelutil.py
ROW_RANGE_RE = re.compile(r"(\$?[1-9][0-9]{0,6}):(\$?[1-9][0-9]{0,6})$")
COL_RANGE_RE = re.compile(r"(\$?[A-Za-z]{1,3}):(\$?[A-Za-z]{1,3})$")
CELL_REF_RE = re.compile(r"(\$?[A-Za-z]{1,3})(\$?[1-9][0-9]{0,6})$")
# We might need to test these util functions
def is_almost_equal(a, b, precision = 0.0001):
if is_number(a) and is_number(b):
return abs(float(a) - float(b)) <= precision
elif (a is None or a == 'None') and (b is None or b == 'None'):
return True
else: # booleans or strings
return str(a) == str(b)
def is_range(address):
if isinstance(address, Exception):
return address
return address.find(':') > 0
split_range_cache = {}
def split_range(rng):
if rng in split_range_cache:
return split_range_cache[rng]
else:
if rng.find('!') > 0:
start,end = rng.split(':')
if start.find('!') > 0:
sh,start = start.split("!")
if end.find('!') > 0:
sh,end = end.split("!")
else:
sh = None
start,end = rng.split(':')
split_range_cache[rng] = (sh, start, end)
return (sh,start,end)
split_address_cache = {}
def split_address(address):
if address in split_address_cache:
return split_address_cache[address]
else:
sheet = None
if address.find('!') > 0:
sheet,addr = address.split('!')
else:
addr = address
#ignore case
addr = addr.upper()
# regular <col><row> format
if re.match('^[A-Z\$]+[\d\$]+$', addr):
col,row = [_f for _f in re.split('([A-Z\$]+)',addr) if _f]
# R<row>C<col> format
elif re.match('^R\d+C\d+$', addr):
row,col = addr.split('C')
row = row[1:]
# R[<row>]C[<col>] format
elif re.match('^R\[\d+\]C\[\d+\]$', addr):
row,col = addr.split('C')
row = row[2:-1]
col = col[2:-1]
# [<row>] format
elif re.match('^[\d\$]+$', addr):
row = addr
col = None
# [<col>] format
elif re.match('^[A-Z\$]$', addr):
row = None
col = addr
else:
raise Exception('Invalid address format ' + addr)
split_address_cache[address] = (sheet, col, row)
return (sheet,col,row)
resolve_range_cache = {}
def resolve_range(rng, should_flatten = False, sheet=''):
# print 'RESOLVE RANGE splitting', rng
if ':' not in rng:
if '!' in rng:
rng = rng.split('!')
return ExcelError('#REF!', info = '%s is not a regular range, nor a named_range' % rng)
sh, start, end = split_range(rng)
if sh and sheet:
if sh != sheet:
raise Exception("Mismatched sheets %s and %s" % (sh,sheet))
else:
sheet += '!'
elif sh and not sheet:
sheet = sh + "!"
elif sheet and not sh:
sheet += "!"
else:
pass
# `unicode` != `str` in Python2. See `from openpyxl.compat import unicode`
if type(sheet) == str and str != unicode:
sheet = unicode(sheet, 'utf-8')
if type(rng) == str and str != unicode:
rng = unicode(rng, 'utf-8')
key = rng+str(should_flatten)+sheet
if key in resolve_range_cache:
return resolve_range_cache[key]
else:
if not is_range(rng): return ([sheet + rng],1,1)
# single cell, no range
if start.isdigit() and end.isdigit():
# This copes with 1:1 style ranges
start_col = "A"
start_row = start
end_col = "XFD"
end_row = end
elif start.isalpha() and end.isalpha():
# This copes with A:A style ranges
start_col = start
start_row = 1
end_col = end
end_row = 2**20
else:
sh, start_col, start_row = split_address(start)
sh, end_col, end_row = split_address(end)
start_col_idx = col2num(start_col)
end_col_idx = col2num(end_col);
start_row = int(start_row)
end_row = int(end_row)
# Attempt to use Numpy, not relevant for now
# num2col_vec = np.vectorize(num2col)
# r = np.array([range(start_row, end_row + 1),]*nb_col, dtype='a5').T
# c = num2col_vec(np.array([range(start_col_idx, end_col_idx + 1),]*nb_row))
# if len(sheet)>0:
# s = np.chararray((nb_row, nb_col), itemsize=len(sheet))
# s[:] = sheet
# c = np.core.defchararray.add(s, c)
# B = np.core.defchararray.add(c, r)
# if start_col == end_col:
# data = B.T.tolist()[0]
# return data, len(data), 1
# elif start_row == end_row:
# data = B.tolist()[0]
# return data, 1, len(data)
# else:
# if should_flatten:
# return B.flatten().tolist(), 1, nb_col*nb_row
# else:
# return B.tolist(), nb_row, nb_col
# single column
if start_col == end_col:
nrows = end_row - start_row + 1
data = [ "%s%s%s" % (s,c,r) for (s,c,r) in zip([sheet]*nrows,[start_col]*nrows,list(range(start_row,end_row+1)))]
output = data,len(data),1
# single row
elif start_row == end_row:
ncols = end_col_idx - start_col_idx + 1
data = [ "%s%s%s" % (s,num2col(c),r) for (s,c,r) in zip([sheet]*ncols,list(range(start_col_idx,end_col_idx+1)),[start_row]*ncols)]
output = data,1,len(data)
# rectangular range
else:
cells = []
for r in range(start_row,end_row+1):
row = []
for c in range(start_col_idx,end_col_idx+1):
row.append(sheet + num2col(c) + str(r))
cells.append(row)
if should_flatten:
# flatten into one list
l = list(flatten(cells, only_lists = True))
output = l,len(cells), len(cells[0])
else:
output = cells, len(cells), len(cells[0])
resolve_range_cache[key] = output
return output
col2num_cache = {}
# e.g., convert BA -> 53
def col2num(col):
if col in col2num_cache:
return col2num_cache[col]
else:
if not col:
raise Exception("Column may not be empty")
tot = 0
for i,c in enumerate([c for c in col[::-1] if c != "$"]):
if c == '$': continue
tot += (ord(c)-64) * 26 ** i
col2num_cache[col] = tot
return tot
num2col_cache = {}
# convert back
def num2col(num):
if num in num2col_cache:
return num2col_cache[num]
else:
if num < 1:
raise Exception("Number must be larger than 0: %s" % num)
s = ''
q = num
while q > 0:
(q,r) = divmod(q,26)
if r == 0:
q = q - 1
r = 26
s = ASCII[r-1] + s
num2col_cache[num] = s
return s
def address2index(a):
sh,c,r = split_address(a)
return (col2num(c),int(r))
def index2addres(c,r,sheet=None):
return "%s%s%s" % (sheet + "!" if sheet else "", num2col(c), r)
def get_linest_degree(excel,cl):
# TODO: assumes a row or column of linest formulas & that all coefficients are needed
sh,c,r,ci = cl.address_parts()
# figure out where we are in the row
# to the left
i = ci - 1
while i > 0:
f = excel.get_formula_from_range(index2addres(i,r))
if f is None or f != cl.formula:
break
else:
i = i - 1
# to the right
j = ci + 1
while True:
f = excel.get_formula_from_range(index2addres(j,r))
if f is None or f != cl.formula:
break
else:
j = j + 1
# assume the degree is the number of linest's
degree = (j - i - 1) - 1 #last -1 is because an n degree polynomial has n+1 coefs
# which coef are we (left most coef is the coef for the highest power)
coef = ci - i
# no linests left or right, try looking up/down
if degree == 0:
# up
i = r - 1
while i > 0:
f = excel.get_formula_from_range("%s%s" % (c,i))
if f is None or f != cl.formula:
break
else:
i = i - 1
# down
j = r + 1
while True:
f = excel.get_formula_from_range("%s%s" % (c,j))
if f is None or f != cl.formula:
break
else:
j = j + 1
degree = (j - i - 1) - 1
coef = r - i
# if degree is zero -> only one linest formula -> linear regression -> degree should be one
return (max(degree,1),coef)
def flatten(l, only_lists = False):
instance = list if only_lists else collections.Iterable
for el in l:
if isinstance(el, instance) and not isinstance(el, string_types):
for sub in flatten(el, only_lists = only_lists):
yield sub
else:
yield el
def uniqueify(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
def is_number(s): # http://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float-in-python
try:
float(s)
return True
except:
return False
def is_leap_year(year):
if not is_number(year):
raise TypeError("%s must be a number" % str(year))
if year <= 0:
raise TypeError("%s must be strictly positive" % str(year))
# Watch out, 1900 is a leap according to Excel => https://support.microsoft.com/en-us/kb/214326
return (year % 4 == 0 and year % 100 != 0 or year % 400 == 0) or year == 1900
def get_max_days_in_month(month, year):
if not is_number(year) or not is_number(month):
raise TypeError("All inputs must be a number")
if year <= 0 or month <= 0:
raise TypeError("All inputs must be strictly positive")
if month in (4, 6, 9, 11):
return 30
elif month == 2:
if is_leap_year(year):
return 29
else:
return 28
else:
return 31
def normalize_year(y, m, d):
if m <= 0:
y -= int(abs(m) / 12 + 1)
m = 12 - (abs(m) % 12)
normalize_year(y, m, d)
elif m > 12:
y += int(m / 12)
m = m % 12
if d <= 0:
d += get_max_days_in_month(m, y)
m -= 1
y, m, d = normalize_year(y, m, d)
else:
if m in (4, 6, 9, 11) and d > 30:
m += 1
d -= 30
y, m, d = normalize_year(y, m, d)
elif m == 2:
if (is_leap_year(y)) and d > 29:
m += 1
d -= 29
y, m, d = normalize_year(y, m, d)
elif (not is_leap_year(y)) and d > 28:
m += 1
d -= 28
y, m, d = normalize_year(y, m, d)
elif d > 31:
m += 1
d -= 31
y, m, d = normalize_year(y, m, d)
return (y, m, d)
def date_from_int(nb):
if not is_number(nb):
raise TypeError("%s is not a number" % str(nb))
# origin of the Excel date system
current_year = 1900
current_month = 0
current_day = 0
while(nb > 0):
if not is_leap_year(current_year) and nb > 365:
current_year += 1
nb -= 365
elif is_leap_year(current_year) and nb > 366:
current_year += 1
nb -= 366
else:
current_month += 1
max_days = get_max_days_in_month(current_month, current_year)
if nb > max_days:
nb -= max_days
else:
current_day = int(nb)
nb = 0
return (current_year, current_month, current_day)
def int_from_date(date):
temp = dt.date(1899, 12, 30) # Note, not 31st Dec but 30th!
delta = date - temp
return float(delta.days) + (float(delta.seconds) / 86400)
def criteria_parser(criteria):
if is_number(criteria):
def check(x):
try:
x = float(x)
except:
return False
return x == float(criteria) #and type(x) == type(criteria)
elif type(criteria) == str:
search = re.search('(\W*)(.*)', criteria.lower()).group
operator = search(1)
value = search(2)
value = float(value) if is_number(value) else str(value)
if operator == '<':
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x < value
elif operator == '>':
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x > value
elif operator == '>=':
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x >= value
elif operator == '<=':
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x <= value
elif operator == '<>':
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x != value
elif operator == '=' and is_number(value):
def check(x):
if not is_number(x):
return False # Excel returns False when a string is compared with a value
return x == value
elif operator == '=':
def check(x):
return str(x).lower() == str(value)
else:
def check(x):
return str(x).lower() == criteria.lower()
else:
raise Exception('Could\'t parse criteria %s' % criteria)
return check
def find_corresponding_index(list, criteria):
# parse criteria
check = criteria_parser(criteria)
valid = []
for index, item in enumerate(list):
if check(item):
valid.append(index)
return valid
def check_length(range1, range2):
if len(range1.values) != len(range2.values):
raise ValueError('Ranges don\'t have the same size')
else:
return range2
def extract_numeric_values(*args):
values = []
for arg in args:
if isinstance(arg, collections.Iterable) and type(arg) != list and type(arg) != tuple and type(arg) != str and type(arg) != unicode: # does not work fo other Iterable than RangeCore, but can t import RangeCore here for circular reference issues
for x in arg.values:
if is_number(x) and type(x) is not bool: # excludes booleans from nested ranges
values.append(x)
elif type(arg) is tuple or type(arg) is list:
for x in arg:
if is_number(x) and type(x) is not bool: # excludes booleans from nested ranges
values.append(x)
elif is_number(arg):
values.append(arg)
return values
def old_div(a, b):
"""
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
Copied from:
https://github.com/PythonCharmers/python-future/blob/master/src/past/utils/__init__.py
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
if __name__ == '__main__':
pass
|
anthill/koala | koala/excellib.py | rows | python | def rows(array):
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows | Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L410-L426 | null | # cython: profile=True
'''
Python equivalents of various excel functions
'''
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excellib.py
from __future__ import absolute_import, division
import numpy as np
import scipy.optimize
import datetime
from math import log, ceil
from decimal import Decimal, ROUND_UP, ROUND_HALF_UP
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from openpyxl.compat import unicode
from koala.utils import *
from koala.Range import RangeCore as Range
from koala.ExcelError import *
from functools import reduce
######################################################################################
# A dictionary that maps excel function names onto python equivalents. You should
# only add an entry to this map if the python name is different to the excel name
# (which it may need to be to prevent conflicts with existing python functions
# with that name, e.g., max).
# So if excel defines a function foobar(), all you have to do is add a function
# called foobar to this module. You only need to add it to the function map,
# if you want to use a different name in the python code.
# Note: some functions (if, pi, atan2, and, or, array, ...) are already taken care of
# in the FunctionNode code, so adding them here will have no effect.
FUNCTION_MAP = {
"ln":"xlog",
"min":"xmin",
"min":"xmin",
"max":"xmax",
"sum":"xsum",
"gammaln":"lgamma",
"round": "xround"
}
IND_FUN = [
"SUM",
"MIN",
"IF",
"TAN",
"ATAN2",
"PI",
"ARRAY",
"ARRAYROW",
"AND",
"OR",
"ALL",
"VALUE",
"LOG",
"MAX",
"SUMPRODUCT",
"IRR",
"MIN",
"SUM",
"CHOOSE",
"SUMIF",
"AVERAGE",
"RIGHT",
"INDEX",
"LOOKUP",
"LINEST",
"NPV",
"MATCH",
"MOD",
"COUNT",
"COUNTA",
"COUNTIF",
"COUNTIFS",
"MATCH",
"LOOKUP",
"INDEX",
"AVERAGE",
"SUMIFS",
"ROUND",
"ROWS",
"COLUMNS",
"MID",
"DATE",
"YEARFRAC",
"ISNA",
"ISBLANK",
"ISTEXT",
"OFFSET",
"SUMPRODUCT",
"IFERROR",
"IRR",
"XIRR",
"VLOOKUP",
"VDB",
"SLN",
"XNPV",
"PMT",
"ROUNDUP",
"POWER",
"SQRT",
"TODAY",
"YEAR",
"MONTH",
"EOMONTH",
]
CELL_CHARACTER_LIMIT = 32767
EXCEL_EPOCH = datetime.datetime.strptime("1900-01-01", '%Y-%m-%d').date()
######################################################################################
# List of excel equivalent functions
# TODO: needs unit testing
def value(text):
# make the distinction for naca numbers
if text.find('.') > 0:
return float(text)
elif text.endswith('%'):
text = text.replace('%', '')
return float(text) / 100
else:
return int(text)
def xlog(a):
if isinstance(a,(list,tuple,np.ndarray)):
return [log(x) for x in flatten(a)]
else:
#print a
return log(a)
def xmax(*args): # Excel reference: https://support.office.com/en-us/article/MAX-function-e0012414-9ac8-4b34-9a47-73e662c08098
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return max(values)
def xmin(*args): # Excel reference: https://support.office.com/en-us/article/MIN-function-61635d12-920f-4ce2-a70f-96f202dcc152
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return min(values)
def xsum(*args): # Excel reference: https://support.office.com/en-us/article/SUM-function-043e1c7d-7726-4e80-8f32-07b23e057f89
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return sum(values)
def choose(index_num, *values): # Excel reference: https://support.office.com/en-us/article/CHOOSE-function-fc5c184f-cb62-4ec7-a46e-38653b98f5bc
index = int(index_num)
if index <= 0 or index > 254:
return ExcelError('#VALUE!', '%s must be between 1 and 254' % str(index_num))
elif index > len(values):
return ExcelError('#VALUE!', '%s must not be larger than the number of values: %s' % (str(index_num), len(values)))
else:
return values[index - 1]
def sumif(range, criteria, sum_range = None): # Excel reference: https://support.office.com/en-us/article/SUMIF-function-169b8c99-c05c-4483-a712-1697a653039b
# WARNING:
# - wildcards not supported
# - doesn't really follow 2nd remark about sum_range length
if not isinstance(range, Range):
return TypeError('%s must be a Range' % str(range))
if isinstance(criteria, Range) and not isinstance(criteria , (str, bool)): # ugly...
return 0
indexes = find_corresponding_index(range.values, criteria)
if sum_range:
if not isinstance(sum_range, Range):
return TypeError('%s must be a Range' % str(sum_range))
def f(x):
return sum_range.values[x] if x < sum_range.length else 0
return sum(map(f, indexes))
else:
return sum([range.values[x] for x in indexes])
def sumifs(*args):
# Excel reference: https://support.office.com/en-us/article/
# sumifs-function-c9e748f5-7ea7-455d-9406-611cebce642b
nb_criteria = (len(args)-1) / 2
args = list(args)
# input checks
if nb_criteria == 0:
return TypeError('At least one criteria and criteria range should be provided.')
if int(nb_criteria) != nb_criteria:
return TypeError('Number of criteria an criteria ranges should be equal.')
nb_criteria = int(nb_criteria)
# separate arguments
sum_range = args[0]
criteria_ranges = args[1::2]
criteria = args[2::2]
index = list(range(0, len(sum_range)))
for i in range(nb_criteria):
criteria_range = criteria_ranges[i]
criterion = str(criteria[i])
index_tmp = find_corresponding_index(criteria_range.values, criterion)
index = np.intersect1d(index, index_tmp)
sum_select = [sum_range.values[i] for i in index]
res = sum(sum_select)
return res
def average(*args): # Excel reference: https://support.office.com/en-us/article/AVERAGE-function-047bac88-d466-426c-a32b-8f33eb960cf6
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
return sum(values) / len(values)
def right(text,n):
#TODO: hack to deal with naca section numbers
if isinstance(text, unicode) or isinstance(text,str):
return text[-n:]
else:
# TODO: get rid of the decimal
return str(int(text))[-n:]
def index(my_range, row, col = None): # Excel reference: https://support.office.com/en-us/article/INDEX-function-a5dcf0dd-996d-40a4-a822-b56b061328bd
for i in [my_range, row, col]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
row = int(row) if row is not None else row
col = int(col) if col is not None else col
if isinstance(my_range, Range):
cells = my_range.addresses
nr = my_range.nrows
nc = my_range.ncols
else:
cells, nr, nc = my_range
if nr > 1 or nc > 1:
a = np.array(cells)
cells = a.flatten().tolist()
nr = int(nr)
nc = int(nc)
if type(cells) != list:
return ExcelError('#VALUE!', '%s must be a list' % str(cells))
if row is not None and not is_number(row):
return ExcelError('#VALUE!', '%s must be a number' % str(row))
if row == 0 and col == 0:
return ExcelError('#VALUE!', 'No index asked for Range')
if col is None and nr == 1 and row <= nc:
# special case where index is matched on row, and the second row input can be used as a col
col = row
row = None
if row is not None and row > nr:
return ExcelError('#VALUE!', 'Index %i out of range' % row)
if nr == 1:
col = row if col is None else col
return cells[int(col) - 1]
if nc == 1:
return cells[int(row) - 1]
else: # could be optimised
if col is None or row is None:
return ExcelError('#VALUE!', 'Range is 2 dimensional, can not reach value with 1 arg as None')
if not is_number(col):
return ExcelError('#VALUE!', '%s must be a number' % str(col))
if col > nc:
return ExcelError('#VALUE!', 'Index %i out of range' % col)
indices = list(range(len(cells)))
if row == 0: # get column
filtered_indices = [x for x in indices if x % nc == col - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
elif col == 0: # get row
filtered_indices = [x for x in indices if int(x / nc) == row - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
else:
return cells[(row - 1)* nc + (col - 1)]
def lookup(value, lookup_range, result_range = None): # Excel reference: https://support.office.com/en-us/article/LOOKUP-function-446d94af-663b-451d-8251-369d5e3864cb
# TODO
if not isinstance(value,(int,float)):
return Exception("Non numeric lookups (%s) not supported" % value)
# TODO: note, may return the last equal value
# index of the last numeric value
lastnum = -1
for i,v in enumerate(lookup_range.values):
if isinstance(v,(int,float)):
if v > value:
break
else:
lastnum = i
output_range = result_range.values if result_range is not None else lookup_range.values
if lastnum < 0:
return ExcelError('#VALUE!', 'No numeric data found in the lookup range')
else:
if i == 0:
return ExcelError('#VALUE!', 'All values in the lookup range are bigger than %s' % value)
else:
if i >= len(lookup_range)-1:
# return the biggest number smaller than value
return output_range[lastnum]
else:
return output_range[i-1]
# NEEDS TEST
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d
Y = list(args[0].values())
X = list(args[1].values())
if len(args) == 3:
const = args[2]
if isinstance(const,str):
const = (const.lower() == "true")
else:
const = True
degree = kwargs.get('degree',1)
# build the vandermonde matrix
A = np.vander(X, degree+1)
if not const:
# force the intercept to zero
A[:,-1] = np.zeros((1,len(X)))
# perform the fit
(coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
return coefs
# NEEDS TEST
def npv(*args): # Excel reference: https://support.office.com/en-us/article/NPV-function-8672cb67-2576-4d07-b67b-ac28acf2a568
discount_rate = args[0]
cashflow = args[1]
if isinstance(cashflow, Range):
cashflow = cashflow.values
return sum([float(x)*(1+discount_rate)**-(i+1) for (i,x) in enumerate(cashflow)])
def columns(array):
"""
Function to find the number of columns in an array.
Excel reference: https://support.office.com/en-us/article/columns-function-4e8e7b4e-e603-43e8-b177-956088fa48ca
:param array: the array of which the columns should be counted.
:return: the number of columns.
"""
return rows(array)
def match(lookup_value, lookup_range, match_type=1): # Excel reference: https://support.office.com/en-us/article/MATCH-function-e8dffd45-c762-47d6-bf89-533f4a37673a
if not isinstance(lookup_range, Range):
return ExcelError('#VALUE!', 'Lookup_range is not a Range')
def type_convert(value):
if type(value) == str:
value = value.lower()
elif type(value) == int:
value = float(value)
elif value is None:
value = 0
return value;
def type_convert_float(value):
if is_number(value):
value = float(value)
else:
value = None
return value
lookup_value = type_convert(lookup_value)
range_values = [x for x in lookup_range.values if x is not None] # filter None values to avoid asc/desc order errors
range_length = len(range_values)
if match_type == 1:
# Verify ascending sort
posMax = -1
for i in range(range_length):
current = type_convert(range_values[i])
if i < range_length - 1:
if current > type_convert(range_values[i + 1]):
return ExcelError('#VALUE!', 'for match_type 1, lookup_range must be sorted ascending')
if current <= lookup_value:
posMax = i
if posMax == -1:
return ExcelError('#VALUE!','no result in lookup_range for match_type 1')
return posMax +1 #Excel starts at 1
elif match_type == 0:
# No string wildcard
try:
if is_number(lookup_value):
lookup_value = float(lookup_value)
output = [type_convert_float(x) for x in range_values].index(lookup_value) + 1
else:
output = [str(x).lower() for x in range_values].index(lookup_value) + 1
return output
except:
return ExcelError('#VALUE!', '%s not found' % lookup_value)
elif match_type == -1:
# Verify descending sort
posMin = -1
for i in range((range_length)):
current = type_convert(range_values[i])
if i is not range_length-1 and current < type_convert(range_values[i+1]):
return ExcelError('#VALUE!','for match_type -1, lookup_range must be sorted descending')
if current >= lookup_value:
posMin = i
if posMin == -1:
return ExcelError('#VALUE!', 'no result in lookup_range for match_type -1')
return posMin +1 #Excel starts at 1
def mod(nb, q): # Excel Reference: https://support.office.com/en-us/article/MOD-function-9b6cd169-b6ee-406a-a97b-edf2a9dc24f3
if not isinstance(nb, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(nb))
elif not isinstance(q, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(q))
else:
return nb % q
def eomonth(start_date, months): # Excel reference: https://support.office.com/en-us/article/eomonth-function-7314ffa1-2bc9-4005-9d66-f49db127d628
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if not is_number(months):
return ExcelError('#VALUE!', 'months %s must be a number' % str(months))
y1, m1, d1 = date_from_int(start_date)
start_date_d = datetime.date(year=y1, month=m1, day=d1)
end_date_d = start_date_d + relativedelta(months=months)
y2 = end_date_d.year
m2 = end_date_d.month
d2 = monthrange(y2, m2)[1]
res = int(int_from_date(datetime.date(y2, m2, d2)))
return res
def year(serial_number): # Excel reference: https://support.office.com/en-us/article/year-function-c64f017a-1354-490d-981f-578e8ec8d3b9
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return y1
def month(serial_number): # Excel reference: https://support.office.com/en-us/article/month-function-579a2881-199b-48b2-ab90-ddba0eba86e8
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return m1
def count(*args): # Excel reference: https://support.office.com/en-us/article/COUNT-function-a59cd7fc-b623-4d93-87a4-d23bf411294c
l = list(args)
total = 0
for arg in l:
if isinstance(arg, Range):
total += len([x for x in arg.values if is_number(x) and type(x) is not bool]) # count inside a list
elif is_number(arg): # int() is used for text representation of numbers
total += 1
return total
def counta(range):
if isinstance(range, ExcelError) or range in ErrorCodes:
if range.value == '#NULL':
return 0
else:
return range # return the Excel Error
# raise Exception('ExcelError other than #NULL passed to excellib.counta()')
else:
return len([x for x in range.values if x != None])
def countif(range, criteria): # Excel reference: https://support.office.com/en-us/article/COUNTIF-function-e0de10c6-f885-4e71-abb4-1f464816df34
# WARNING:
# - wildcards not supported
# - support of strings with >, <, <=, =>, <> not provided
valid = find_corresponding_index(range.values, criteria)
return len(valid)
def countifs(*args): # Excel reference: https://support.office.com/en-us/article/COUNTIFS-function-dda3dc6e-f74e-4aee-88bc-aa8c2a866842
arg_list = list(args)
l = len(arg_list)
if l % 2 != 0:
return ExcelError('#VALUE!', 'excellib.countifs() must have a pair number of arguments, here %d' % l)
if l >= 2:
indexes = find_corresponding_index(args[0].values, args[1]) # find indexes that match first layer of countif
remaining_ranges = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 0] # get only ranges
remaining_criteria = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 1] # get only criteria
# verif that all Ranges are associated COULDNT MAKE THIS WORK CORRECTLY BECAUSE OF RECURSION
# association_type = None
# temp = [args[0]] + remaining_ranges
# for index, range in enumerate(temp): # THIS IS SHIT, but works ok
# if type(range) == Range and index < len(temp) - 1:
# asso_type = range.is_associated(temp[index + 1])
# print 'asso', asso_type
# if association_type is None:
# association_type = asso_type
# elif associated_type != asso_type:
# association_type = None
# break
# print 'ASSO', association_type
# if association_type is None:
# return ValueError('All items must be Ranges and associated')
filtered_remaining_ranges = []
for range in remaining_ranges: # filter items in remaining_ranges that match valid indexes from first countif layer
filtered_remaining_cells = []
filtered_remaining_range = []
for index, item in enumerate(range.values):
if index in indexes:
filtered_remaining_cells.append(range.addresses[index]) # reconstructing cells from indexes
filtered_remaining_range.append(item) # reconstructing values from indexes
# WARNING HERE
filtered_remaining_ranges.append(Range(filtered_remaining_cells, filtered_remaining_range))
new_tuple = ()
for index, range in enumerate(filtered_remaining_ranges): # rebuild the tuple that will be the argument of next layer
new_tuple += (range, remaining_criteria[index])
return min(countifs(*new_tuple), len(indexes)) # only consider the minimum number across all layer responses
else:
return float('inf')
def xround(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUND-function-c018c5d8-40fb-4053-90b1-b3e7f61a213c
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_HALF_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return round(number, num_digits)
def roundup(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUNDUP-function-f8bc9b23-e795-47db-8703-db171d0c42a7
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return ceil(number / pow(10, -num_digits)) * pow(10, -num_digits)
def mid(text, start_num, num_chars): # Excel reference: https://support.office.com/en-us/article/MID-MIDB-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028
text = str(text)
if len(text) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE!', 'text is too long. Is %s needs to be %s or less.' % (len(text), CELL_CHARACTER_LIMIT))
if type(start_num) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(start_num))
if type(num_chars) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(num_chars))
if start_num < 1:
return ExcelError('#VALUE!', '%s is < 1' % str(start_num))
if num_chars < 0:
return ExcelError('#VALUE!', '%s is < 0' % str(num_chars))
return text[(start_num - 1): (start_num - 1 + num_chars)]
def date(year, month, day): # Excel reference: https://support.office.com/en-us/article/DATE-function-e36c0c8c-4104-49da-ab83-82328b832349
if type(year) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(year))
if type(month) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(month))
if type(day) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(day))
if year < 0 or year > 9999:
return ExcelError('#VALUE!', 'Year must be between 1 and 9999, instead %s' % str(year))
if year < 1900:
year = 1900 + year
year, month, day = normalize_year(year, month, day) # taking into account negative month and day values
date_0 = datetime.datetime(1900, 1, 1)
date = datetime.datetime(year, month, day)
result = (datetime.datetime(year, month, day) - date_0).days + 2
if result <= 0:
return ExcelError('#VALUE!', 'Date result is negative')
else:
return result
def yearfrac(start_date, end_date, basis = 0): # Excel reference: https://support.office.com/en-us/article/YEARFRAC-function-3844141e-c76d-4143-82b6-208454ddc6a8
def actual_nb_days_ISDA(start, end): # needed to separate days_in_leap_year from days_not_leap_year
y1, m1, d1 = start
y2, m2, d2 = end
days_in_leap_year = 0
days_not_in_leap_year = 0
year_range = list(range(y1, y2 + 1))
for y in year_range:
if y == y1 and y == y2:
nb_days = date(y2, m2, d2) - date(y1, m1, d1)
elif y == y1:
nb_days = date(y1 + 1, 1, 1) - date(y1, m1, d1)
elif y == y2:
nb_days = date(y2, m2, d2) - date(y2, 1, 1)
else:
nb_days = 366 if is_leap_year(y) else 365
if is_leap_year(y):
days_in_leap_year += nb_days
else:
days_not_in_leap_year += nb_days
return (days_not_in_leap_year, days_in_leap_year)
def actual_nb_days_AFB_alter(start, end): # http://svn.finmath.net/finmath%20lib/trunk/src/main/java/net/finmath/time/daycount/DayCountConvention_ACT_ACT_YEARFRAC.java
y1, m1, d1 = start
y2, m2, d2 = end
delta = date(*end) - date(*start)
if delta <= 365:
if is_leap_year(y1) and is_leap_year(y2):
denom = 366
elif is_leap_year(y1) and date(y1, m1, d1) <= date(y1, 2, 29):
denom = 366
elif is_leap_year(y2) and date(y2, m2, d2) >= date(y2, 2, 29):
denom = 366
else:
denom = 365
else:
year_range = list(range(y1, y2 + 1))
nb = 0
for y in year_range:
nb += 366 if is_leap_year(y) else 365
denom = nb / len(year_range)
return delta / denom
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if not is_number(end_date):
return ExcelError('#VALUE!', 'end_date %s must be number' % str(end_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if end_date < 0:
return ExcelError('#VALUE!', 'end_date %s must be positive' % str(end_date))
if start_date > end_date: # switch dates if start_date > end_date
temp = end_date
end_date = start_date
start_date = temp
y1, m1, d1 = date_from_int(start_date)
y2, m2, d2 = date_from_int(end_date)
if basis == 0: # US 30/360
d2 = 30 if d2 == 31 and (d1 == 31 or d1 == 30) else min(d2, 31)
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
elif basis == 1: # Actual/actual
result = actual_nb_days_AFB_alter((y1, m1, d1), (y2, m2, d2))
elif basis == 2: # Actual/360
result = (end_date - start_date) / 360
elif basis == 3: # Actual/365
result = (end_date - start_date) / 365
elif basis == 4: # Eurobond 30/360
d2 = 30 if d2 == 31 else d2
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
else:
return ExcelError('#VALUE!', '%d must be 0, 1, 2, 3 or 4' % basis)
return result
def isna(value):
# This function might need more solid testing
try:
eval(value)
return False
except:
return True
def isblank(value):
return value is None
def istext(value):
return type(value) == str
def offset(reference, rows, cols, height=None, width=None): # Excel reference: https://support.office.com/en-us/article/OFFSET-function-c8de19ae-dd79-4b9b-a14e-b4d906d11b66
# This function accepts a list of addresses
# Maybe think of passing a Range as first argument
for i in [reference, rows, cols, height, width]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
rows = int(rows)
cols = int(cols)
# get first cell address of reference
if is_range(reference):
ref = resolve_range(reference, should_flatten = True)[0][0]
else:
ref = reference
ref_sheet = ''
end_address = ''
if '!' in ref:
ref_sheet = ref.split('!')[0] + '!'
ref_cell = ref.split('!')[1]
else:
ref_cell = ref
found = re.search(CELL_REF_RE, ref)
new_col = col2num(found.group(1)) + cols
new_row = int(found.group(2)) + rows
if new_row <= 0 or new_col <= 0:
return ExcelError('#VALUE!', 'Offset is out of bounds')
start_address = str(num2col(new_col)) + str(new_row)
if (height is not None and width is not None):
if type(height) != int:
return ExcelError('#VALUE!', '%d must not be integer' % height)
if type(width) != int:
return ExcelError('#VALUE!', '%d must not be integer' % width)
if height > 0:
end_row = new_row + height - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % height)
if width > 0:
end_col = new_col + width - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % width)
end_address = ':' + str(num2col(end_col)) + str(end_row)
elif height and not width or not height and width:
return ExcelError('Height and width must be passed together')
return ref_sheet + start_address + end_address
def sumproduct(*ranges): # Excel reference: https://support.office.com/en-us/article/SUMPRODUCT-function-16753e75-9f68-4874-94ac-4d2145a2fd2e
range_list = list(ranges)
for r in range_list: # if a range has no values (i.e if it's empty)
if len(r.values) == 0:
return 0
for range in range_list:
for item in range.values:
# If there is an ExcelError inside a Range, sumproduct should output an ExcelError
if isinstance(item, ExcelError):
return ExcelError("#N/A", "ExcelErrors are present in the sumproduct items")
reduce(check_length, range_list) # check that all ranges have the same size
return reduce(lambda X, Y: X + Y, reduce(lambda x, y: Range.apply_all('multiply', x, y), range_list).values)
def iferror(value, value_if_error): # Excel reference: https://support.office.com/en-us/article/IFERROR-function-c526fd07-caeb-47b8-8bb6-63f3e417f611
if isinstance(value, ExcelError) or value in ErrorCodes:
return value_if_error
else:
return value
def irr(values, guess = None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e)
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10)
def vlookup(lookup_value, table_array, col_index_num, range_lookup = True): # https://support.office.com/en-us/article/VLOOKUP-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1
if not isinstance(table_array, Range):
return ExcelError('#VALUE', 'table_array should be a Range')
if col_index_num > table_array.ncols:
return ExcelError('#VALUE', 'col_index_num is greater than the number of cols in table_array')
first_column = table_array.get(0, 1)
result_column = table_array.get(0, col_index_num)
if not range_lookup:
if lookup_value not in first_column.values:
return ExcelError('#N/A', 'lookup_value not in first column of table_array')
else:
i = first_column.values.index(lookup_value)
ref = first_column.order[i]
else:
i = None
for v in first_column.values:
if lookup_value >= v:
i = first_column.values.index(v)
ref = first_column.order[i]
else:
break
if i is None:
return ExcelError('#N/A', 'lookup_value smaller than all values of table_array')
return Range.find_associated_value(ref, result_column)
def sln(cost, salvage, life): # Excel reference: https://support.office.com/en-us/article/SLN-function-cdb666e5-c1c6-40a7-806a-e695edc2f1c8
for arg in [cost, salvage, life]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
return (cost - salvage) / life
def vdb(cost, salvage, life, start_period, end_period, factor = 2, no_switch = False): # Excel reference: https://support.office.com/en-us/article/VDB-function-dde4e207-f3fa-488d-91d2-66d55e861d73
for arg in [cost, salvage, life, start_period, end_period, factor, no_switch]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
for arg in [cost, salvage, life, start_period, end_period, factor]:
if not isinstance(arg, (float, int)):
return ExcelError('#VALUE', 'Arg %s should be an int, float or long, instead: %s' % (arg, type(arg)))
start_period = start_period
end_period = end_period
sln_depr = sln(cost, salvage, life)
depr_rate = factor / life
acc_depr = 0
depr = 0
switch_to_sln = False
sln_depr = 0
result = 0
start_life = 0
delta_life = life % 1
if delta_life > 0: # to handle cases when life is not an integer
end_life = int(life + 1)
else:
end_life = int(life)
periods = list(range(start_life, end_life))
if int(start_period) != start_period:
delta_start = abs(int(start_period) - start_period)
depr = (cost - acc_depr) * depr_rate * delta_start
acc_depr += depr
start_life = 1
periods = [x + 0.5 for x in periods]
for index, current_year in enumerate(periods):
if not no_switch: # no_switch = False (Default Case)
if switch_to_sln:
depr = sln_depr
else:
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
temp_sln_depr = sln(cost, salvage, life)
if depr < temp_sln_depr:
switch_to_sln = True
fixed_remaining_years = life - current_year - 1
fixed_remaining_cost = cost - acc_depr
# we need to check future sln: current depr should never be smaller than sln to come
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
if sln_depr > depr: # if it's the case, we switch to sln earlier than the regular case
# cancel what has been done
acc_depr -= depr
fixed_remaining_years += 1
fixed_remaining_cost = cost - acc_depr
# recalculate depreciation
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
depr = sln_depr
acc_depr += depr
else: # no_switch = True
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
delta_start = abs(current_year - start_period)
if delta_start < 1 and delta_start != 0:
result += depr * (1 - delta_start)
elif current_year >= start_period and current_year < end_period:
delta_end = abs(end_period - current_year)
if delta_end < 1 and delta_end != 0:
result += depr * delta_end
else:
result += depr
return result
def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
"""
Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv
def pmt(*args): # Excel reference: https://support.office.com/en-us/article/PMT-function-0214da64-9a63-4996-bc20-214433fa6441
rate = args[0]
num_payments = args[1]
present_value = args[2]
# WARNING fv & type not used yet - both are assumed to be their defaults (0)
# fv = args[3]
# type = args[4]
return -present_value * rate / (1 - np.power(1 + rate, -num_payments))
# https://support.office.com/en-us/article/POWER-function-D3F2908B-56F4-4C3F-895A-07FB519C362A
def power(number, power):
if number == power == 0:
# Really excel? What were you thinking?
return ExcelError('#NUM!', 'Number and power cannot both be zero' % str(number))
if power < 1 and number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(number))
return np.power(number, power)
# https://support.office.com/en-ie/article/sqrt-function-654975c2-05c4-4831-9a24-2c65e4040fdf
def sqrt(number):
if number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(index_num))
return np.sqrt(number)
# https://support.office.com/en-ie/article/today-function-5eb3078d-a82c-4736-8930-2f51a028fdd9
def today():
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
"""Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date."""
return days_since_epoch.days + 2
# https://support.office.com/en-us/article/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2
def concat(*args):
return concatenate(*tuple(flatten(args)))
# https://support.office.com/en-us/article/CONCATENATE-function-8F8AE884-2CA8-4F7A-B093-75D702BEA31D
# Important: In Excel 2016, Excel Mobile, and Excel Online, this function has
# been replaced with the CONCAT function. Although the CONCATENATE function is
# still available for backward compatibility, you should consider using CONCAT
# from now on. This is because CONCATENATE may not be available in future
# versions of Excel.
#
# BE AWARE; there are functional differences between CONACTENATE AND CONCAT
#
def concatenate(*args):
if tuple(flatten(args)) != args:
return ExcelError('#VALUE', 'Could not process arguments %s' % (args))
cat_string = ''.join(str(a) for a in args)
if len(cat_string) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE', 'Too long. concatentaed string should be no longer than %s but is %s' % (CELL_CHARACTER_LIMIT, len(cat_String)))
return cat_string
if __name__ == '__main__':
pass
|
anthill/koala | koala/excellib.py | irr | python | def irr(values, guess = None):
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e) | Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L947-L967 | null | # cython: profile=True
'''
Python equivalents of various excel functions
'''
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excellib.py
from __future__ import absolute_import, division
import numpy as np
import scipy.optimize
import datetime
from math import log, ceil
from decimal import Decimal, ROUND_UP, ROUND_HALF_UP
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from openpyxl.compat import unicode
from koala.utils import *
from koala.Range import RangeCore as Range
from koala.ExcelError import *
from functools import reduce
######################################################################################
# A dictionary that maps excel function names onto python equivalents. You should
# only add an entry to this map if the python name is different to the excel name
# (which it may need to be to prevent conflicts with existing python functions
# with that name, e.g., max).
# So if excel defines a function foobar(), all you have to do is add a function
# called foobar to this module. You only need to add it to the function map,
# if you want to use a different name in the python code.
# Note: some functions (if, pi, atan2, and, or, array, ...) are already taken care of
# in the FunctionNode code, so adding them here will have no effect.
FUNCTION_MAP = {
"ln":"xlog",
"min":"xmin",
"min":"xmin",
"max":"xmax",
"sum":"xsum",
"gammaln":"lgamma",
"round": "xround"
}
IND_FUN = [
"SUM",
"MIN",
"IF",
"TAN",
"ATAN2",
"PI",
"ARRAY",
"ARRAYROW",
"AND",
"OR",
"ALL",
"VALUE",
"LOG",
"MAX",
"SUMPRODUCT",
"IRR",
"MIN",
"SUM",
"CHOOSE",
"SUMIF",
"AVERAGE",
"RIGHT",
"INDEX",
"LOOKUP",
"LINEST",
"NPV",
"MATCH",
"MOD",
"COUNT",
"COUNTA",
"COUNTIF",
"COUNTIFS",
"MATCH",
"LOOKUP",
"INDEX",
"AVERAGE",
"SUMIFS",
"ROUND",
"ROWS",
"COLUMNS",
"MID",
"DATE",
"YEARFRAC",
"ISNA",
"ISBLANK",
"ISTEXT",
"OFFSET",
"SUMPRODUCT",
"IFERROR",
"IRR",
"XIRR",
"VLOOKUP",
"VDB",
"SLN",
"XNPV",
"PMT",
"ROUNDUP",
"POWER",
"SQRT",
"TODAY",
"YEAR",
"MONTH",
"EOMONTH",
]
CELL_CHARACTER_LIMIT = 32767
EXCEL_EPOCH = datetime.datetime.strptime("1900-01-01", '%Y-%m-%d').date()
######################################################################################
# List of excel equivalent functions
# TODO: needs unit testing
def value(text):
# make the distinction for naca numbers
if text.find('.') > 0:
return float(text)
elif text.endswith('%'):
text = text.replace('%', '')
return float(text) / 100
else:
return int(text)
def xlog(a):
if isinstance(a,(list,tuple,np.ndarray)):
return [log(x) for x in flatten(a)]
else:
#print a
return log(a)
def xmax(*args): # Excel reference: https://support.office.com/en-us/article/MAX-function-e0012414-9ac8-4b34-9a47-73e662c08098
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return max(values)
def xmin(*args): # Excel reference: https://support.office.com/en-us/article/MIN-function-61635d12-920f-4ce2-a70f-96f202dcc152
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return min(values)
def xsum(*args): # Excel reference: https://support.office.com/en-us/article/SUM-function-043e1c7d-7726-4e80-8f32-07b23e057f89
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return sum(values)
def choose(index_num, *values): # Excel reference: https://support.office.com/en-us/article/CHOOSE-function-fc5c184f-cb62-4ec7-a46e-38653b98f5bc
index = int(index_num)
if index <= 0 or index > 254:
return ExcelError('#VALUE!', '%s must be between 1 and 254' % str(index_num))
elif index > len(values):
return ExcelError('#VALUE!', '%s must not be larger than the number of values: %s' % (str(index_num), len(values)))
else:
return values[index - 1]
def sumif(range, criteria, sum_range = None): # Excel reference: https://support.office.com/en-us/article/SUMIF-function-169b8c99-c05c-4483-a712-1697a653039b
# WARNING:
# - wildcards not supported
# - doesn't really follow 2nd remark about sum_range length
if not isinstance(range, Range):
return TypeError('%s must be a Range' % str(range))
if isinstance(criteria, Range) and not isinstance(criteria , (str, bool)): # ugly...
return 0
indexes = find_corresponding_index(range.values, criteria)
if sum_range:
if not isinstance(sum_range, Range):
return TypeError('%s must be a Range' % str(sum_range))
def f(x):
return sum_range.values[x] if x < sum_range.length else 0
return sum(map(f, indexes))
else:
return sum([range.values[x] for x in indexes])
def sumifs(*args):
# Excel reference: https://support.office.com/en-us/article/
# sumifs-function-c9e748f5-7ea7-455d-9406-611cebce642b
nb_criteria = (len(args)-1) / 2
args = list(args)
# input checks
if nb_criteria == 0:
return TypeError('At least one criteria and criteria range should be provided.')
if int(nb_criteria) != nb_criteria:
return TypeError('Number of criteria an criteria ranges should be equal.')
nb_criteria = int(nb_criteria)
# separate arguments
sum_range = args[0]
criteria_ranges = args[1::2]
criteria = args[2::2]
index = list(range(0, len(sum_range)))
for i in range(nb_criteria):
criteria_range = criteria_ranges[i]
criterion = str(criteria[i])
index_tmp = find_corresponding_index(criteria_range.values, criterion)
index = np.intersect1d(index, index_tmp)
sum_select = [sum_range.values[i] for i in index]
res = sum(sum_select)
return res
def average(*args): # Excel reference: https://support.office.com/en-us/article/AVERAGE-function-047bac88-d466-426c-a32b-8f33eb960cf6
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
return sum(values) / len(values)
def right(text,n):
#TODO: hack to deal with naca section numbers
if isinstance(text, unicode) or isinstance(text,str):
return text[-n:]
else:
# TODO: get rid of the decimal
return str(int(text))[-n:]
def index(my_range, row, col = None): # Excel reference: https://support.office.com/en-us/article/INDEX-function-a5dcf0dd-996d-40a4-a822-b56b061328bd
for i in [my_range, row, col]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
row = int(row) if row is not None else row
col = int(col) if col is not None else col
if isinstance(my_range, Range):
cells = my_range.addresses
nr = my_range.nrows
nc = my_range.ncols
else:
cells, nr, nc = my_range
if nr > 1 or nc > 1:
a = np.array(cells)
cells = a.flatten().tolist()
nr = int(nr)
nc = int(nc)
if type(cells) != list:
return ExcelError('#VALUE!', '%s must be a list' % str(cells))
if row is not None and not is_number(row):
return ExcelError('#VALUE!', '%s must be a number' % str(row))
if row == 0 and col == 0:
return ExcelError('#VALUE!', 'No index asked for Range')
if col is None and nr == 1 and row <= nc:
# special case where index is matched on row, and the second row input can be used as a col
col = row
row = None
if row is not None and row > nr:
return ExcelError('#VALUE!', 'Index %i out of range' % row)
if nr == 1:
col = row if col is None else col
return cells[int(col) - 1]
if nc == 1:
return cells[int(row) - 1]
else: # could be optimised
if col is None or row is None:
return ExcelError('#VALUE!', 'Range is 2 dimensional, can not reach value with 1 arg as None')
if not is_number(col):
return ExcelError('#VALUE!', '%s must be a number' % str(col))
if col > nc:
return ExcelError('#VALUE!', 'Index %i out of range' % col)
indices = list(range(len(cells)))
if row == 0: # get column
filtered_indices = [x for x in indices if x % nc == col - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
elif col == 0: # get row
filtered_indices = [x for x in indices if int(x / nc) == row - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
else:
return cells[(row - 1)* nc + (col - 1)]
def lookup(value, lookup_range, result_range = None): # Excel reference: https://support.office.com/en-us/article/LOOKUP-function-446d94af-663b-451d-8251-369d5e3864cb
# TODO
if not isinstance(value,(int,float)):
return Exception("Non numeric lookups (%s) not supported" % value)
# TODO: note, may return the last equal value
# index of the last numeric value
lastnum = -1
for i,v in enumerate(lookup_range.values):
if isinstance(v,(int,float)):
if v > value:
break
else:
lastnum = i
output_range = result_range.values if result_range is not None else lookup_range.values
if lastnum < 0:
return ExcelError('#VALUE!', 'No numeric data found in the lookup range')
else:
if i == 0:
return ExcelError('#VALUE!', 'All values in the lookup range are bigger than %s' % value)
else:
if i >= len(lookup_range)-1:
# return the biggest number smaller than value
return output_range[lastnum]
else:
return output_range[i-1]
# NEEDS TEST
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d
Y = list(args[0].values())
X = list(args[1].values())
if len(args) == 3:
const = args[2]
if isinstance(const,str):
const = (const.lower() == "true")
else:
const = True
degree = kwargs.get('degree',1)
# build the vandermonde matrix
A = np.vander(X, degree+1)
if not const:
# force the intercept to zero
A[:,-1] = np.zeros((1,len(X)))
# perform the fit
(coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
return coefs
# NEEDS TEST
def npv(*args): # Excel reference: https://support.office.com/en-us/article/NPV-function-8672cb67-2576-4d07-b67b-ac28acf2a568
discount_rate = args[0]
cashflow = args[1]
if isinstance(cashflow, Range):
cashflow = cashflow.values
return sum([float(x)*(1+discount_rate)**-(i+1) for (i,x) in enumerate(cashflow)])
def rows(array):
"""
Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows.
"""
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows
def columns(array):
"""
Function to find the number of columns in an array.
Excel reference: https://support.office.com/en-us/article/columns-function-4e8e7b4e-e603-43e8-b177-956088fa48ca
:param array: the array of which the columns should be counted.
:return: the number of columns.
"""
return rows(array)
def match(lookup_value, lookup_range, match_type=1): # Excel reference: https://support.office.com/en-us/article/MATCH-function-e8dffd45-c762-47d6-bf89-533f4a37673a
if not isinstance(lookup_range, Range):
return ExcelError('#VALUE!', 'Lookup_range is not a Range')
def type_convert(value):
if type(value) == str:
value = value.lower()
elif type(value) == int:
value = float(value)
elif value is None:
value = 0
return value;
def type_convert_float(value):
if is_number(value):
value = float(value)
else:
value = None
return value
lookup_value = type_convert(lookup_value)
range_values = [x for x in lookup_range.values if x is not None] # filter None values to avoid asc/desc order errors
range_length = len(range_values)
if match_type == 1:
# Verify ascending sort
posMax = -1
for i in range(range_length):
current = type_convert(range_values[i])
if i < range_length - 1:
if current > type_convert(range_values[i + 1]):
return ExcelError('#VALUE!', 'for match_type 1, lookup_range must be sorted ascending')
if current <= lookup_value:
posMax = i
if posMax == -1:
return ExcelError('#VALUE!','no result in lookup_range for match_type 1')
return posMax +1 #Excel starts at 1
elif match_type == 0:
# No string wildcard
try:
if is_number(lookup_value):
lookup_value = float(lookup_value)
output = [type_convert_float(x) for x in range_values].index(lookup_value) + 1
else:
output = [str(x).lower() for x in range_values].index(lookup_value) + 1
return output
except:
return ExcelError('#VALUE!', '%s not found' % lookup_value)
elif match_type == -1:
# Verify descending sort
posMin = -1
for i in range((range_length)):
current = type_convert(range_values[i])
if i is not range_length-1 and current < type_convert(range_values[i+1]):
return ExcelError('#VALUE!','for match_type -1, lookup_range must be sorted descending')
if current >= lookup_value:
posMin = i
if posMin == -1:
return ExcelError('#VALUE!', 'no result in lookup_range for match_type -1')
return posMin +1 #Excel starts at 1
def mod(nb, q): # Excel Reference: https://support.office.com/en-us/article/MOD-function-9b6cd169-b6ee-406a-a97b-edf2a9dc24f3
if not isinstance(nb, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(nb))
elif not isinstance(q, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(q))
else:
return nb % q
def eomonth(start_date, months): # Excel reference: https://support.office.com/en-us/article/eomonth-function-7314ffa1-2bc9-4005-9d66-f49db127d628
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if not is_number(months):
return ExcelError('#VALUE!', 'months %s must be a number' % str(months))
y1, m1, d1 = date_from_int(start_date)
start_date_d = datetime.date(year=y1, month=m1, day=d1)
end_date_d = start_date_d + relativedelta(months=months)
y2 = end_date_d.year
m2 = end_date_d.month
d2 = monthrange(y2, m2)[1]
res = int(int_from_date(datetime.date(y2, m2, d2)))
return res
def year(serial_number): # Excel reference: https://support.office.com/en-us/article/year-function-c64f017a-1354-490d-981f-578e8ec8d3b9
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return y1
def month(serial_number): # Excel reference: https://support.office.com/en-us/article/month-function-579a2881-199b-48b2-ab90-ddba0eba86e8
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return m1
def count(*args): # Excel reference: https://support.office.com/en-us/article/COUNT-function-a59cd7fc-b623-4d93-87a4-d23bf411294c
l = list(args)
total = 0
for arg in l:
if isinstance(arg, Range):
total += len([x for x in arg.values if is_number(x) and type(x) is not bool]) # count inside a list
elif is_number(arg): # int() is used for text representation of numbers
total += 1
return total
def counta(range):
if isinstance(range, ExcelError) or range in ErrorCodes:
if range.value == '#NULL':
return 0
else:
return range # return the Excel Error
# raise Exception('ExcelError other than #NULL passed to excellib.counta()')
else:
return len([x for x in range.values if x != None])
def countif(range, criteria): # Excel reference: https://support.office.com/en-us/article/COUNTIF-function-e0de10c6-f885-4e71-abb4-1f464816df34
# WARNING:
# - wildcards not supported
# - support of strings with >, <, <=, =>, <> not provided
valid = find_corresponding_index(range.values, criteria)
return len(valid)
def countifs(*args): # Excel reference: https://support.office.com/en-us/article/COUNTIFS-function-dda3dc6e-f74e-4aee-88bc-aa8c2a866842
arg_list = list(args)
l = len(arg_list)
if l % 2 != 0:
return ExcelError('#VALUE!', 'excellib.countifs() must have a pair number of arguments, here %d' % l)
if l >= 2:
indexes = find_corresponding_index(args[0].values, args[1]) # find indexes that match first layer of countif
remaining_ranges = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 0] # get only ranges
remaining_criteria = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 1] # get only criteria
# verif that all Ranges are associated COULDNT MAKE THIS WORK CORRECTLY BECAUSE OF RECURSION
# association_type = None
# temp = [args[0]] + remaining_ranges
# for index, range in enumerate(temp): # THIS IS SHIT, but works ok
# if type(range) == Range and index < len(temp) - 1:
# asso_type = range.is_associated(temp[index + 1])
# print 'asso', asso_type
# if association_type is None:
# association_type = asso_type
# elif associated_type != asso_type:
# association_type = None
# break
# print 'ASSO', association_type
# if association_type is None:
# return ValueError('All items must be Ranges and associated')
filtered_remaining_ranges = []
for range in remaining_ranges: # filter items in remaining_ranges that match valid indexes from first countif layer
filtered_remaining_cells = []
filtered_remaining_range = []
for index, item in enumerate(range.values):
if index in indexes:
filtered_remaining_cells.append(range.addresses[index]) # reconstructing cells from indexes
filtered_remaining_range.append(item) # reconstructing values from indexes
# WARNING HERE
filtered_remaining_ranges.append(Range(filtered_remaining_cells, filtered_remaining_range))
new_tuple = ()
for index, range in enumerate(filtered_remaining_ranges): # rebuild the tuple that will be the argument of next layer
new_tuple += (range, remaining_criteria[index])
return min(countifs(*new_tuple), len(indexes)) # only consider the minimum number across all layer responses
else:
return float('inf')
def xround(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUND-function-c018c5d8-40fb-4053-90b1-b3e7f61a213c
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_HALF_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return round(number, num_digits)
def roundup(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUNDUP-function-f8bc9b23-e795-47db-8703-db171d0c42a7
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return ceil(number / pow(10, -num_digits)) * pow(10, -num_digits)
def mid(text, start_num, num_chars): # Excel reference: https://support.office.com/en-us/article/MID-MIDB-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028
text = str(text)
if len(text) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE!', 'text is too long. Is %s needs to be %s or less.' % (len(text), CELL_CHARACTER_LIMIT))
if type(start_num) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(start_num))
if type(num_chars) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(num_chars))
if start_num < 1:
return ExcelError('#VALUE!', '%s is < 1' % str(start_num))
if num_chars < 0:
return ExcelError('#VALUE!', '%s is < 0' % str(num_chars))
return text[(start_num - 1): (start_num - 1 + num_chars)]
def date(year, month, day): # Excel reference: https://support.office.com/en-us/article/DATE-function-e36c0c8c-4104-49da-ab83-82328b832349
if type(year) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(year))
if type(month) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(month))
if type(day) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(day))
if year < 0 or year > 9999:
return ExcelError('#VALUE!', 'Year must be between 1 and 9999, instead %s' % str(year))
if year < 1900:
year = 1900 + year
year, month, day = normalize_year(year, month, day) # taking into account negative month and day values
date_0 = datetime.datetime(1900, 1, 1)
date = datetime.datetime(year, month, day)
result = (datetime.datetime(year, month, day) - date_0).days + 2
if result <= 0:
return ExcelError('#VALUE!', 'Date result is negative')
else:
return result
def yearfrac(start_date, end_date, basis = 0): # Excel reference: https://support.office.com/en-us/article/YEARFRAC-function-3844141e-c76d-4143-82b6-208454ddc6a8
def actual_nb_days_ISDA(start, end): # needed to separate days_in_leap_year from days_not_leap_year
y1, m1, d1 = start
y2, m2, d2 = end
days_in_leap_year = 0
days_not_in_leap_year = 0
year_range = list(range(y1, y2 + 1))
for y in year_range:
if y == y1 and y == y2:
nb_days = date(y2, m2, d2) - date(y1, m1, d1)
elif y == y1:
nb_days = date(y1 + 1, 1, 1) - date(y1, m1, d1)
elif y == y2:
nb_days = date(y2, m2, d2) - date(y2, 1, 1)
else:
nb_days = 366 if is_leap_year(y) else 365
if is_leap_year(y):
days_in_leap_year += nb_days
else:
days_not_in_leap_year += nb_days
return (days_not_in_leap_year, days_in_leap_year)
def actual_nb_days_AFB_alter(start, end): # http://svn.finmath.net/finmath%20lib/trunk/src/main/java/net/finmath/time/daycount/DayCountConvention_ACT_ACT_YEARFRAC.java
y1, m1, d1 = start
y2, m2, d2 = end
delta = date(*end) - date(*start)
if delta <= 365:
if is_leap_year(y1) and is_leap_year(y2):
denom = 366
elif is_leap_year(y1) and date(y1, m1, d1) <= date(y1, 2, 29):
denom = 366
elif is_leap_year(y2) and date(y2, m2, d2) >= date(y2, 2, 29):
denom = 366
else:
denom = 365
else:
year_range = list(range(y1, y2 + 1))
nb = 0
for y in year_range:
nb += 366 if is_leap_year(y) else 365
denom = nb / len(year_range)
return delta / denom
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if not is_number(end_date):
return ExcelError('#VALUE!', 'end_date %s must be number' % str(end_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if end_date < 0:
return ExcelError('#VALUE!', 'end_date %s must be positive' % str(end_date))
if start_date > end_date: # switch dates if start_date > end_date
temp = end_date
end_date = start_date
start_date = temp
y1, m1, d1 = date_from_int(start_date)
y2, m2, d2 = date_from_int(end_date)
if basis == 0: # US 30/360
d2 = 30 if d2 == 31 and (d1 == 31 or d1 == 30) else min(d2, 31)
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
elif basis == 1: # Actual/actual
result = actual_nb_days_AFB_alter((y1, m1, d1), (y2, m2, d2))
elif basis == 2: # Actual/360
result = (end_date - start_date) / 360
elif basis == 3: # Actual/365
result = (end_date - start_date) / 365
elif basis == 4: # Eurobond 30/360
d2 = 30 if d2 == 31 else d2
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
else:
return ExcelError('#VALUE!', '%d must be 0, 1, 2, 3 or 4' % basis)
return result
def isna(value):
# This function might need more solid testing
try:
eval(value)
return False
except:
return True
def isblank(value):
return value is None
def istext(value):
return type(value) == str
def offset(reference, rows, cols, height=None, width=None): # Excel reference: https://support.office.com/en-us/article/OFFSET-function-c8de19ae-dd79-4b9b-a14e-b4d906d11b66
# This function accepts a list of addresses
# Maybe think of passing a Range as first argument
for i in [reference, rows, cols, height, width]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
rows = int(rows)
cols = int(cols)
# get first cell address of reference
if is_range(reference):
ref = resolve_range(reference, should_flatten = True)[0][0]
else:
ref = reference
ref_sheet = ''
end_address = ''
if '!' in ref:
ref_sheet = ref.split('!')[0] + '!'
ref_cell = ref.split('!')[1]
else:
ref_cell = ref
found = re.search(CELL_REF_RE, ref)
new_col = col2num(found.group(1)) + cols
new_row = int(found.group(2)) + rows
if new_row <= 0 or new_col <= 0:
return ExcelError('#VALUE!', 'Offset is out of bounds')
start_address = str(num2col(new_col)) + str(new_row)
if (height is not None and width is not None):
if type(height) != int:
return ExcelError('#VALUE!', '%d must not be integer' % height)
if type(width) != int:
return ExcelError('#VALUE!', '%d must not be integer' % width)
if height > 0:
end_row = new_row + height - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % height)
if width > 0:
end_col = new_col + width - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % width)
end_address = ':' + str(num2col(end_col)) + str(end_row)
elif height and not width or not height and width:
return ExcelError('Height and width must be passed together')
return ref_sheet + start_address + end_address
def sumproduct(*ranges): # Excel reference: https://support.office.com/en-us/article/SUMPRODUCT-function-16753e75-9f68-4874-94ac-4d2145a2fd2e
range_list = list(ranges)
for r in range_list: # if a range has no values (i.e if it's empty)
if len(r.values) == 0:
return 0
for range in range_list:
for item in range.values:
# If there is an ExcelError inside a Range, sumproduct should output an ExcelError
if isinstance(item, ExcelError):
return ExcelError("#N/A", "ExcelErrors are present in the sumproduct items")
reduce(check_length, range_list) # check that all ranges have the same size
return reduce(lambda X, Y: X + Y, reduce(lambda x, y: Range.apply_all('multiply', x, y), range_list).values)
def iferror(value, value_if_error): # Excel reference: https://support.office.com/en-us/article/IFERROR-function-c526fd07-caeb-47b8-8bb6-63f3e417f611
if isinstance(value, ExcelError) or value in ErrorCodes:
return value_if_error
else:
return value
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10)
def vlookup(lookup_value, table_array, col_index_num, range_lookup = True): # https://support.office.com/en-us/article/VLOOKUP-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1
if not isinstance(table_array, Range):
return ExcelError('#VALUE', 'table_array should be a Range')
if col_index_num > table_array.ncols:
return ExcelError('#VALUE', 'col_index_num is greater than the number of cols in table_array')
first_column = table_array.get(0, 1)
result_column = table_array.get(0, col_index_num)
if not range_lookup:
if lookup_value not in first_column.values:
return ExcelError('#N/A', 'lookup_value not in first column of table_array')
else:
i = first_column.values.index(lookup_value)
ref = first_column.order[i]
else:
i = None
for v in first_column.values:
if lookup_value >= v:
i = first_column.values.index(v)
ref = first_column.order[i]
else:
break
if i is None:
return ExcelError('#N/A', 'lookup_value smaller than all values of table_array')
return Range.find_associated_value(ref, result_column)
def sln(cost, salvage, life): # Excel reference: https://support.office.com/en-us/article/SLN-function-cdb666e5-c1c6-40a7-806a-e695edc2f1c8
for arg in [cost, salvage, life]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
return (cost - salvage) / life
def vdb(cost, salvage, life, start_period, end_period, factor = 2, no_switch = False): # Excel reference: https://support.office.com/en-us/article/VDB-function-dde4e207-f3fa-488d-91d2-66d55e861d73
for arg in [cost, salvage, life, start_period, end_period, factor, no_switch]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
for arg in [cost, salvage, life, start_period, end_period, factor]:
if not isinstance(arg, (float, int)):
return ExcelError('#VALUE', 'Arg %s should be an int, float or long, instead: %s' % (arg, type(arg)))
start_period = start_period
end_period = end_period
sln_depr = sln(cost, salvage, life)
depr_rate = factor / life
acc_depr = 0
depr = 0
switch_to_sln = False
sln_depr = 0
result = 0
start_life = 0
delta_life = life % 1
if delta_life > 0: # to handle cases when life is not an integer
end_life = int(life + 1)
else:
end_life = int(life)
periods = list(range(start_life, end_life))
if int(start_period) != start_period:
delta_start = abs(int(start_period) - start_period)
depr = (cost - acc_depr) * depr_rate * delta_start
acc_depr += depr
start_life = 1
periods = [x + 0.5 for x in periods]
for index, current_year in enumerate(periods):
if not no_switch: # no_switch = False (Default Case)
if switch_to_sln:
depr = sln_depr
else:
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
temp_sln_depr = sln(cost, salvage, life)
if depr < temp_sln_depr:
switch_to_sln = True
fixed_remaining_years = life - current_year - 1
fixed_remaining_cost = cost - acc_depr
# we need to check future sln: current depr should never be smaller than sln to come
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
if sln_depr > depr: # if it's the case, we switch to sln earlier than the regular case
# cancel what has been done
acc_depr -= depr
fixed_remaining_years += 1
fixed_remaining_cost = cost - acc_depr
# recalculate depreciation
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
depr = sln_depr
acc_depr += depr
else: # no_switch = True
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
delta_start = abs(current_year - start_period)
if delta_start < 1 and delta_start != 0:
result += depr * (1 - delta_start)
elif current_year >= start_period and current_year < end_period:
delta_end = abs(end_period - current_year)
if delta_end < 1 and delta_end != 0:
result += depr * delta_end
else:
result += depr
return result
def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
"""
Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv
def pmt(*args): # Excel reference: https://support.office.com/en-us/article/PMT-function-0214da64-9a63-4996-bc20-214433fa6441
rate = args[0]
num_payments = args[1]
present_value = args[2]
# WARNING fv & type not used yet - both are assumed to be their defaults (0)
# fv = args[3]
# type = args[4]
return -present_value * rate / (1 - np.power(1 + rate, -num_payments))
# https://support.office.com/en-us/article/POWER-function-D3F2908B-56F4-4C3F-895A-07FB519C362A
def power(number, power):
if number == power == 0:
# Really excel? What were you thinking?
return ExcelError('#NUM!', 'Number and power cannot both be zero' % str(number))
if power < 1 and number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(number))
return np.power(number, power)
# https://support.office.com/en-ie/article/sqrt-function-654975c2-05c4-4831-9a24-2c65e4040fdf
def sqrt(number):
if number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(index_num))
return np.sqrt(number)
# https://support.office.com/en-ie/article/today-function-5eb3078d-a82c-4736-8930-2f51a028fdd9
def today():
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
"""Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date."""
return days_since_epoch.days + 2
# https://support.office.com/en-us/article/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2
def concat(*args):
return concatenate(*tuple(flatten(args)))
# https://support.office.com/en-us/article/CONCATENATE-function-8F8AE884-2CA8-4F7A-B093-75D702BEA31D
# Important: In Excel 2016, Excel Mobile, and Excel Online, this function has
# been replaced with the CONCAT function. Although the CONCATENATE function is
# still available for backward compatibility, you should consider using CONCAT
# from now on. This is because CONCATENATE may not be available in future
# versions of Excel.
#
# BE AWARE; there are functional differences between CONACTENATE AND CONCAT
#
def concatenate(*args):
if tuple(flatten(args)) != args:
return ExcelError('#VALUE', 'Could not process arguments %s' % (args))
cat_string = ''.join(str(a) for a in args)
if len(cat_string) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE', 'Too long. concatentaed string should be no longer than %s but is %s' % (CELL_CHARACTER_LIMIT, len(cat_String)))
return cat_string
if __name__ == '__main__':
pass
|
anthill/koala | koala/excellib.py | xirr | python | def xirr(values, dates, guess=0):
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10) | Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L970-L995 | null | # cython: profile=True
'''
Python equivalents of various excel functions
'''
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excellib.py
from __future__ import absolute_import, division
import numpy as np
import scipy.optimize
import datetime
from math import log, ceil
from decimal import Decimal, ROUND_UP, ROUND_HALF_UP
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from openpyxl.compat import unicode
from koala.utils import *
from koala.Range import RangeCore as Range
from koala.ExcelError import *
from functools import reduce
######################################################################################
# A dictionary that maps excel function names onto python equivalents. You should
# only add an entry to this map if the python name is different to the excel name
# (which it may need to be to prevent conflicts with existing python functions
# with that name, e.g., max).
# So if excel defines a function foobar(), all you have to do is add a function
# called foobar to this module. You only need to add it to the function map,
# if you want to use a different name in the python code.
# Note: some functions (if, pi, atan2, and, or, array, ...) are already taken care of
# in the FunctionNode code, so adding them here will have no effect.
FUNCTION_MAP = {
"ln":"xlog",
"min":"xmin",
"min":"xmin",
"max":"xmax",
"sum":"xsum",
"gammaln":"lgamma",
"round": "xround"
}
IND_FUN = [
"SUM",
"MIN",
"IF",
"TAN",
"ATAN2",
"PI",
"ARRAY",
"ARRAYROW",
"AND",
"OR",
"ALL",
"VALUE",
"LOG",
"MAX",
"SUMPRODUCT",
"IRR",
"MIN",
"SUM",
"CHOOSE",
"SUMIF",
"AVERAGE",
"RIGHT",
"INDEX",
"LOOKUP",
"LINEST",
"NPV",
"MATCH",
"MOD",
"COUNT",
"COUNTA",
"COUNTIF",
"COUNTIFS",
"MATCH",
"LOOKUP",
"INDEX",
"AVERAGE",
"SUMIFS",
"ROUND",
"ROWS",
"COLUMNS",
"MID",
"DATE",
"YEARFRAC",
"ISNA",
"ISBLANK",
"ISTEXT",
"OFFSET",
"SUMPRODUCT",
"IFERROR",
"IRR",
"XIRR",
"VLOOKUP",
"VDB",
"SLN",
"XNPV",
"PMT",
"ROUNDUP",
"POWER",
"SQRT",
"TODAY",
"YEAR",
"MONTH",
"EOMONTH",
]
CELL_CHARACTER_LIMIT = 32767
EXCEL_EPOCH = datetime.datetime.strptime("1900-01-01", '%Y-%m-%d').date()
######################################################################################
# List of excel equivalent functions
# TODO: needs unit testing
def value(text):
# make the distinction for naca numbers
if text.find('.') > 0:
return float(text)
elif text.endswith('%'):
text = text.replace('%', '')
return float(text) / 100
else:
return int(text)
def xlog(a):
if isinstance(a,(list,tuple,np.ndarray)):
return [log(x) for x in flatten(a)]
else:
#print a
return log(a)
def xmax(*args): # Excel reference: https://support.office.com/en-us/article/MAX-function-e0012414-9ac8-4b34-9a47-73e662c08098
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return max(values)
def xmin(*args): # Excel reference: https://support.office.com/en-us/article/MIN-function-61635d12-920f-4ce2-a70f-96f202dcc152
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return min(values)
def xsum(*args): # Excel reference: https://support.office.com/en-us/article/SUM-function-043e1c7d-7726-4e80-8f32-07b23e057f89
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return sum(values)
def choose(index_num, *values): # Excel reference: https://support.office.com/en-us/article/CHOOSE-function-fc5c184f-cb62-4ec7-a46e-38653b98f5bc
index = int(index_num)
if index <= 0 or index > 254:
return ExcelError('#VALUE!', '%s must be between 1 and 254' % str(index_num))
elif index > len(values):
return ExcelError('#VALUE!', '%s must not be larger than the number of values: %s' % (str(index_num), len(values)))
else:
return values[index - 1]
def sumif(range, criteria, sum_range = None): # Excel reference: https://support.office.com/en-us/article/SUMIF-function-169b8c99-c05c-4483-a712-1697a653039b
# WARNING:
# - wildcards not supported
# - doesn't really follow 2nd remark about sum_range length
if not isinstance(range, Range):
return TypeError('%s must be a Range' % str(range))
if isinstance(criteria, Range) and not isinstance(criteria , (str, bool)): # ugly...
return 0
indexes = find_corresponding_index(range.values, criteria)
if sum_range:
if not isinstance(sum_range, Range):
return TypeError('%s must be a Range' % str(sum_range))
def f(x):
return sum_range.values[x] if x < sum_range.length else 0
return sum(map(f, indexes))
else:
return sum([range.values[x] for x in indexes])
def sumifs(*args):
# Excel reference: https://support.office.com/en-us/article/
# sumifs-function-c9e748f5-7ea7-455d-9406-611cebce642b
nb_criteria = (len(args)-1) / 2
args = list(args)
# input checks
if nb_criteria == 0:
return TypeError('At least one criteria and criteria range should be provided.')
if int(nb_criteria) != nb_criteria:
return TypeError('Number of criteria an criteria ranges should be equal.')
nb_criteria = int(nb_criteria)
# separate arguments
sum_range = args[0]
criteria_ranges = args[1::2]
criteria = args[2::2]
index = list(range(0, len(sum_range)))
for i in range(nb_criteria):
criteria_range = criteria_ranges[i]
criterion = str(criteria[i])
index_tmp = find_corresponding_index(criteria_range.values, criterion)
index = np.intersect1d(index, index_tmp)
sum_select = [sum_range.values[i] for i in index]
res = sum(sum_select)
return res
def average(*args): # Excel reference: https://support.office.com/en-us/article/AVERAGE-function-047bac88-d466-426c-a32b-8f33eb960cf6
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
return sum(values) / len(values)
def right(text,n):
#TODO: hack to deal with naca section numbers
if isinstance(text, unicode) or isinstance(text,str):
return text[-n:]
else:
# TODO: get rid of the decimal
return str(int(text))[-n:]
def index(my_range, row, col = None): # Excel reference: https://support.office.com/en-us/article/INDEX-function-a5dcf0dd-996d-40a4-a822-b56b061328bd
for i in [my_range, row, col]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
row = int(row) if row is not None else row
col = int(col) if col is not None else col
if isinstance(my_range, Range):
cells = my_range.addresses
nr = my_range.nrows
nc = my_range.ncols
else:
cells, nr, nc = my_range
if nr > 1 or nc > 1:
a = np.array(cells)
cells = a.flatten().tolist()
nr = int(nr)
nc = int(nc)
if type(cells) != list:
return ExcelError('#VALUE!', '%s must be a list' % str(cells))
if row is not None and not is_number(row):
return ExcelError('#VALUE!', '%s must be a number' % str(row))
if row == 0 and col == 0:
return ExcelError('#VALUE!', 'No index asked for Range')
if col is None and nr == 1 and row <= nc:
# special case where index is matched on row, and the second row input can be used as a col
col = row
row = None
if row is not None and row > nr:
return ExcelError('#VALUE!', 'Index %i out of range' % row)
if nr == 1:
col = row if col is None else col
return cells[int(col) - 1]
if nc == 1:
return cells[int(row) - 1]
else: # could be optimised
if col is None or row is None:
return ExcelError('#VALUE!', 'Range is 2 dimensional, can not reach value with 1 arg as None')
if not is_number(col):
return ExcelError('#VALUE!', '%s must be a number' % str(col))
if col > nc:
return ExcelError('#VALUE!', 'Index %i out of range' % col)
indices = list(range(len(cells)))
if row == 0: # get column
filtered_indices = [x for x in indices if x % nc == col - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
elif col == 0: # get row
filtered_indices = [x for x in indices if int(x / nc) == row - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
else:
return cells[(row - 1)* nc + (col - 1)]
def lookup(value, lookup_range, result_range = None): # Excel reference: https://support.office.com/en-us/article/LOOKUP-function-446d94af-663b-451d-8251-369d5e3864cb
# TODO
if not isinstance(value,(int,float)):
return Exception("Non numeric lookups (%s) not supported" % value)
# TODO: note, may return the last equal value
# index of the last numeric value
lastnum = -1
for i,v in enumerate(lookup_range.values):
if isinstance(v,(int,float)):
if v > value:
break
else:
lastnum = i
output_range = result_range.values if result_range is not None else lookup_range.values
if lastnum < 0:
return ExcelError('#VALUE!', 'No numeric data found in the lookup range')
else:
if i == 0:
return ExcelError('#VALUE!', 'All values in the lookup range are bigger than %s' % value)
else:
if i >= len(lookup_range)-1:
# return the biggest number smaller than value
return output_range[lastnum]
else:
return output_range[i-1]
# NEEDS TEST
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d
Y = list(args[0].values())
X = list(args[1].values())
if len(args) == 3:
const = args[2]
if isinstance(const,str):
const = (const.lower() == "true")
else:
const = True
degree = kwargs.get('degree',1)
# build the vandermonde matrix
A = np.vander(X, degree+1)
if not const:
# force the intercept to zero
A[:,-1] = np.zeros((1,len(X)))
# perform the fit
(coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
return coefs
# NEEDS TEST
def npv(*args): # Excel reference: https://support.office.com/en-us/article/NPV-function-8672cb67-2576-4d07-b67b-ac28acf2a568
discount_rate = args[0]
cashflow = args[1]
if isinstance(cashflow, Range):
cashflow = cashflow.values
return sum([float(x)*(1+discount_rate)**-(i+1) for (i,x) in enumerate(cashflow)])
def rows(array):
"""
Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows.
"""
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows
def columns(array):
"""
Function to find the number of columns in an array.
Excel reference: https://support.office.com/en-us/article/columns-function-4e8e7b4e-e603-43e8-b177-956088fa48ca
:param array: the array of which the columns should be counted.
:return: the number of columns.
"""
return rows(array)
def match(lookup_value, lookup_range, match_type=1): # Excel reference: https://support.office.com/en-us/article/MATCH-function-e8dffd45-c762-47d6-bf89-533f4a37673a
if not isinstance(lookup_range, Range):
return ExcelError('#VALUE!', 'Lookup_range is not a Range')
def type_convert(value):
if type(value) == str:
value = value.lower()
elif type(value) == int:
value = float(value)
elif value is None:
value = 0
return value;
def type_convert_float(value):
if is_number(value):
value = float(value)
else:
value = None
return value
lookup_value = type_convert(lookup_value)
range_values = [x for x in lookup_range.values if x is not None] # filter None values to avoid asc/desc order errors
range_length = len(range_values)
if match_type == 1:
# Verify ascending sort
posMax = -1
for i in range(range_length):
current = type_convert(range_values[i])
if i < range_length - 1:
if current > type_convert(range_values[i + 1]):
return ExcelError('#VALUE!', 'for match_type 1, lookup_range must be sorted ascending')
if current <= lookup_value:
posMax = i
if posMax == -1:
return ExcelError('#VALUE!','no result in lookup_range for match_type 1')
return posMax +1 #Excel starts at 1
elif match_type == 0:
# No string wildcard
try:
if is_number(lookup_value):
lookup_value = float(lookup_value)
output = [type_convert_float(x) for x in range_values].index(lookup_value) + 1
else:
output = [str(x).lower() for x in range_values].index(lookup_value) + 1
return output
except:
return ExcelError('#VALUE!', '%s not found' % lookup_value)
elif match_type == -1:
# Verify descending sort
posMin = -1
for i in range((range_length)):
current = type_convert(range_values[i])
if i is not range_length-1 and current < type_convert(range_values[i+1]):
return ExcelError('#VALUE!','for match_type -1, lookup_range must be sorted descending')
if current >= lookup_value:
posMin = i
if posMin == -1:
return ExcelError('#VALUE!', 'no result in lookup_range for match_type -1')
return posMin +1 #Excel starts at 1
def mod(nb, q): # Excel Reference: https://support.office.com/en-us/article/MOD-function-9b6cd169-b6ee-406a-a97b-edf2a9dc24f3
if not isinstance(nb, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(nb))
elif not isinstance(q, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(q))
else:
return nb % q
def eomonth(start_date, months): # Excel reference: https://support.office.com/en-us/article/eomonth-function-7314ffa1-2bc9-4005-9d66-f49db127d628
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if not is_number(months):
return ExcelError('#VALUE!', 'months %s must be a number' % str(months))
y1, m1, d1 = date_from_int(start_date)
start_date_d = datetime.date(year=y1, month=m1, day=d1)
end_date_d = start_date_d + relativedelta(months=months)
y2 = end_date_d.year
m2 = end_date_d.month
d2 = monthrange(y2, m2)[1]
res = int(int_from_date(datetime.date(y2, m2, d2)))
return res
def year(serial_number): # Excel reference: https://support.office.com/en-us/article/year-function-c64f017a-1354-490d-981f-578e8ec8d3b9
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return y1
def month(serial_number): # Excel reference: https://support.office.com/en-us/article/month-function-579a2881-199b-48b2-ab90-ddba0eba86e8
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return m1
def count(*args): # Excel reference: https://support.office.com/en-us/article/COUNT-function-a59cd7fc-b623-4d93-87a4-d23bf411294c
l = list(args)
total = 0
for arg in l:
if isinstance(arg, Range):
total += len([x for x in arg.values if is_number(x) and type(x) is not bool]) # count inside a list
elif is_number(arg): # int() is used for text representation of numbers
total += 1
return total
def counta(range):
if isinstance(range, ExcelError) or range in ErrorCodes:
if range.value == '#NULL':
return 0
else:
return range # return the Excel Error
# raise Exception('ExcelError other than #NULL passed to excellib.counta()')
else:
return len([x for x in range.values if x != None])
def countif(range, criteria): # Excel reference: https://support.office.com/en-us/article/COUNTIF-function-e0de10c6-f885-4e71-abb4-1f464816df34
# WARNING:
# - wildcards not supported
# - support of strings with >, <, <=, =>, <> not provided
valid = find_corresponding_index(range.values, criteria)
return len(valid)
def countifs(*args): # Excel reference: https://support.office.com/en-us/article/COUNTIFS-function-dda3dc6e-f74e-4aee-88bc-aa8c2a866842
arg_list = list(args)
l = len(arg_list)
if l % 2 != 0:
return ExcelError('#VALUE!', 'excellib.countifs() must have a pair number of arguments, here %d' % l)
if l >= 2:
indexes = find_corresponding_index(args[0].values, args[1]) # find indexes that match first layer of countif
remaining_ranges = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 0] # get only ranges
remaining_criteria = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 1] # get only criteria
# verif that all Ranges are associated COULDNT MAKE THIS WORK CORRECTLY BECAUSE OF RECURSION
# association_type = None
# temp = [args[0]] + remaining_ranges
# for index, range in enumerate(temp): # THIS IS SHIT, but works ok
# if type(range) == Range and index < len(temp) - 1:
# asso_type = range.is_associated(temp[index + 1])
# print 'asso', asso_type
# if association_type is None:
# association_type = asso_type
# elif associated_type != asso_type:
# association_type = None
# break
# print 'ASSO', association_type
# if association_type is None:
# return ValueError('All items must be Ranges and associated')
filtered_remaining_ranges = []
for range in remaining_ranges: # filter items in remaining_ranges that match valid indexes from first countif layer
filtered_remaining_cells = []
filtered_remaining_range = []
for index, item in enumerate(range.values):
if index in indexes:
filtered_remaining_cells.append(range.addresses[index]) # reconstructing cells from indexes
filtered_remaining_range.append(item) # reconstructing values from indexes
# WARNING HERE
filtered_remaining_ranges.append(Range(filtered_remaining_cells, filtered_remaining_range))
new_tuple = ()
for index, range in enumerate(filtered_remaining_ranges): # rebuild the tuple that will be the argument of next layer
new_tuple += (range, remaining_criteria[index])
return min(countifs(*new_tuple), len(indexes)) # only consider the minimum number across all layer responses
else:
return float('inf')
def xround(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUND-function-c018c5d8-40fb-4053-90b1-b3e7f61a213c
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_HALF_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return round(number, num_digits)
def roundup(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUNDUP-function-f8bc9b23-e795-47db-8703-db171d0c42a7
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return ceil(number / pow(10, -num_digits)) * pow(10, -num_digits)
def mid(text, start_num, num_chars): # Excel reference: https://support.office.com/en-us/article/MID-MIDB-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028
text = str(text)
if len(text) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE!', 'text is too long. Is %s needs to be %s or less.' % (len(text), CELL_CHARACTER_LIMIT))
if type(start_num) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(start_num))
if type(num_chars) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(num_chars))
if start_num < 1:
return ExcelError('#VALUE!', '%s is < 1' % str(start_num))
if num_chars < 0:
return ExcelError('#VALUE!', '%s is < 0' % str(num_chars))
return text[(start_num - 1): (start_num - 1 + num_chars)]
def date(year, month, day): # Excel reference: https://support.office.com/en-us/article/DATE-function-e36c0c8c-4104-49da-ab83-82328b832349
if type(year) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(year))
if type(month) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(month))
if type(day) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(day))
if year < 0 or year > 9999:
return ExcelError('#VALUE!', 'Year must be between 1 and 9999, instead %s' % str(year))
if year < 1900:
year = 1900 + year
year, month, day = normalize_year(year, month, day) # taking into account negative month and day values
date_0 = datetime.datetime(1900, 1, 1)
date = datetime.datetime(year, month, day)
result = (datetime.datetime(year, month, day) - date_0).days + 2
if result <= 0:
return ExcelError('#VALUE!', 'Date result is negative')
else:
return result
def yearfrac(start_date, end_date, basis = 0): # Excel reference: https://support.office.com/en-us/article/YEARFRAC-function-3844141e-c76d-4143-82b6-208454ddc6a8
def actual_nb_days_ISDA(start, end): # needed to separate days_in_leap_year from days_not_leap_year
y1, m1, d1 = start
y2, m2, d2 = end
days_in_leap_year = 0
days_not_in_leap_year = 0
year_range = list(range(y1, y2 + 1))
for y in year_range:
if y == y1 and y == y2:
nb_days = date(y2, m2, d2) - date(y1, m1, d1)
elif y == y1:
nb_days = date(y1 + 1, 1, 1) - date(y1, m1, d1)
elif y == y2:
nb_days = date(y2, m2, d2) - date(y2, 1, 1)
else:
nb_days = 366 if is_leap_year(y) else 365
if is_leap_year(y):
days_in_leap_year += nb_days
else:
days_not_in_leap_year += nb_days
return (days_not_in_leap_year, days_in_leap_year)
def actual_nb_days_AFB_alter(start, end): # http://svn.finmath.net/finmath%20lib/trunk/src/main/java/net/finmath/time/daycount/DayCountConvention_ACT_ACT_YEARFRAC.java
y1, m1, d1 = start
y2, m2, d2 = end
delta = date(*end) - date(*start)
if delta <= 365:
if is_leap_year(y1) and is_leap_year(y2):
denom = 366
elif is_leap_year(y1) and date(y1, m1, d1) <= date(y1, 2, 29):
denom = 366
elif is_leap_year(y2) and date(y2, m2, d2) >= date(y2, 2, 29):
denom = 366
else:
denom = 365
else:
year_range = list(range(y1, y2 + 1))
nb = 0
for y in year_range:
nb += 366 if is_leap_year(y) else 365
denom = nb / len(year_range)
return delta / denom
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if not is_number(end_date):
return ExcelError('#VALUE!', 'end_date %s must be number' % str(end_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if end_date < 0:
return ExcelError('#VALUE!', 'end_date %s must be positive' % str(end_date))
if start_date > end_date: # switch dates if start_date > end_date
temp = end_date
end_date = start_date
start_date = temp
y1, m1, d1 = date_from_int(start_date)
y2, m2, d2 = date_from_int(end_date)
if basis == 0: # US 30/360
d2 = 30 if d2 == 31 and (d1 == 31 or d1 == 30) else min(d2, 31)
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
elif basis == 1: # Actual/actual
result = actual_nb_days_AFB_alter((y1, m1, d1), (y2, m2, d2))
elif basis == 2: # Actual/360
result = (end_date - start_date) / 360
elif basis == 3: # Actual/365
result = (end_date - start_date) / 365
elif basis == 4: # Eurobond 30/360
d2 = 30 if d2 == 31 else d2
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
else:
return ExcelError('#VALUE!', '%d must be 0, 1, 2, 3 or 4' % basis)
return result
def isna(value):
# This function might need more solid testing
try:
eval(value)
return False
except:
return True
def isblank(value):
return value is None
def istext(value):
return type(value) == str
def offset(reference, rows, cols, height=None, width=None): # Excel reference: https://support.office.com/en-us/article/OFFSET-function-c8de19ae-dd79-4b9b-a14e-b4d906d11b66
# This function accepts a list of addresses
# Maybe think of passing a Range as first argument
for i in [reference, rows, cols, height, width]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
rows = int(rows)
cols = int(cols)
# get first cell address of reference
if is_range(reference):
ref = resolve_range(reference, should_flatten = True)[0][0]
else:
ref = reference
ref_sheet = ''
end_address = ''
if '!' in ref:
ref_sheet = ref.split('!')[0] + '!'
ref_cell = ref.split('!')[1]
else:
ref_cell = ref
found = re.search(CELL_REF_RE, ref)
new_col = col2num(found.group(1)) + cols
new_row = int(found.group(2)) + rows
if new_row <= 0 or new_col <= 0:
return ExcelError('#VALUE!', 'Offset is out of bounds')
start_address = str(num2col(new_col)) + str(new_row)
if (height is not None and width is not None):
if type(height) != int:
return ExcelError('#VALUE!', '%d must not be integer' % height)
if type(width) != int:
return ExcelError('#VALUE!', '%d must not be integer' % width)
if height > 0:
end_row = new_row + height - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % height)
if width > 0:
end_col = new_col + width - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % width)
end_address = ':' + str(num2col(end_col)) + str(end_row)
elif height and not width or not height and width:
return ExcelError('Height and width must be passed together')
return ref_sheet + start_address + end_address
def sumproduct(*ranges): # Excel reference: https://support.office.com/en-us/article/SUMPRODUCT-function-16753e75-9f68-4874-94ac-4d2145a2fd2e
range_list = list(ranges)
for r in range_list: # if a range has no values (i.e if it's empty)
if len(r.values) == 0:
return 0
for range in range_list:
for item in range.values:
# If there is an ExcelError inside a Range, sumproduct should output an ExcelError
if isinstance(item, ExcelError):
return ExcelError("#N/A", "ExcelErrors are present in the sumproduct items")
reduce(check_length, range_list) # check that all ranges have the same size
return reduce(lambda X, Y: X + Y, reduce(lambda x, y: Range.apply_all('multiply', x, y), range_list).values)
def iferror(value, value_if_error): # Excel reference: https://support.office.com/en-us/article/IFERROR-function-c526fd07-caeb-47b8-8bb6-63f3e417f611
if isinstance(value, ExcelError) or value in ErrorCodes:
return value_if_error
else:
return value
def irr(values, guess = None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e)
def vlookup(lookup_value, table_array, col_index_num, range_lookup = True): # https://support.office.com/en-us/article/VLOOKUP-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1
if not isinstance(table_array, Range):
return ExcelError('#VALUE', 'table_array should be a Range')
if col_index_num > table_array.ncols:
return ExcelError('#VALUE', 'col_index_num is greater than the number of cols in table_array')
first_column = table_array.get(0, 1)
result_column = table_array.get(0, col_index_num)
if not range_lookup:
if lookup_value not in first_column.values:
return ExcelError('#N/A', 'lookup_value not in first column of table_array')
else:
i = first_column.values.index(lookup_value)
ref = first_column.order[i]
else:
i = None
for v in first_column.values:
if lookup_value >= v:
i = first_column.values.index(v)
ref = first_column.order[i]
else:
break
if i is None:
return ExcelError('#N/A', 'lookup_value smaller than all values of table_array')
return Range.find_associated_value(ref, result_column)
def sln(cost, salvage, life): # Excel reference: https://support.office.com/en-us/article/SLN-function-cdb666e5-c1c6-40a7-806a-e695edc2f1c8
for arg in [cost, salvage, life]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
return (cost - salvage) / life
def vdb(cost, salvage, life, start_period, end_period, factor = 2, no_switch = False): # Excel reference: https://support.office.com/en-us/article/VDB-function-dde4e207-f3fa-488d-91d2-66d55e861d73
for arg in [cost, salvage, life, start_period, end_period, factor, no_switch]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
for arg in [cost, salvage, life, start_period, end_period, factor]:
if not isinstance(arg, (float, int)):
return ExcelError('#VALUE', 'Arg %s should be an int, float or long, instead: %s' % (arg, type(arg)))
start_period = start_period
end_period = end_period
sln_depr = sln(cost, salvage, life)
depr_rate = factor / life
acc_depr = 0
depr = 0
switch_to_sln = False
sln_depr = 0
result = 0
start_life = 0
delta_life = life % 1
if delta_life > 0: # to handle cases when life is not an integer
end_life = int(life + 1)
else:
end_life = int(life)
periods = list(range(start_life, end_life))
if int(start_period) != start_period:
delta_start = abs(int(start_period) - start_period)
depr = (cost - acc_depr) * depr_rate * delta_start
acc_depr += depr
start_life = 1
periods = [x + 0.5 for x in periods]
for index, current_year in enumerate(periods):
if not no_switch: # no_switch = False (Default Case)
if switch_to_sln:
depr = sln_depr
else:
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
temp_sln_depr = sln(cost, salvage, life)
if depr < temp_sln_depr:
switch_to_sln = True
fixed_remaining_years = life - current_year - 1
fixed_remaining_cost = cost - acc_depr
# we need to check future sln: current depr should never be smaller than sln to come
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
if sln_depr > depr: # if it's the case, we switch to sln earlier than the regular case
# cancel what has been done
acc_depr -= depr
fixed_remaining_years += 1
fixed_remaining_cost = cost - acc_depr
# recalculate depreciation
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
depr = sln_depr
acc_depr += depr
else: # no_switch = True
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
delta_start = abs(current_year - start_period)
if delta_start < 1 and delta_start != 0:
result += depr * (1 - delta_start)
elif current_year >= start_period and current_year < end_period:
delta_end = abs(end_period - current_year)
if delta_end < 1 and delta_end != 0:
result += depr * delta_end
else:
result += depr
return result
def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
"""
Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv
def pmt(*args): # Excel reference: https://support.office.com/en-us/article/PMT-function-0214da64-9a63-4996-bc20-214433fa6441
rate = args[0]
num_payments = args[1]
present_value = args[2]
# WARNING fv & type not used yet - both are assumed to be their defaults (0)
# fv = args[3]
# type = args[4]
return -present_value * rate / (1 - np.power(1 + rate, -num_payments))
# https://support.office.com/en-us/article/POWER-function-D3F2908B-56F4-4C3F-895A-07FB519C362A
def power(number, power):
if number == power == 0:
# Really excel? What were you thinking?
return ExcelError('#NUM!', 'Number and power cannot both be zero' % str(number))
if power < 1 and number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(number))
return np.power(number, power)
# https://support.office.com/en-ie/article/sqrt-function-654975c2-05c4-4831-9a24-2c65e4040fdf
def sqrt(number):
if number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(index_num))
return np.sqrt(number)
# https://support.office.com/en-ie/article/today-function-5eb3078d-a82c-4736-8930-2f51a028fdd9
def today():
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
"""Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date."""
return days_since_epoch.days + 2
# https://support.office.com/en-us/article/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2
def concat(*args):
return concatenate(*tuple(flatten(args)))
# https://support.office.com/en-us/article/CONCATENATE-function-8F8AE884-2CA8-4F7A-B093-75D702BEA31D
# Important: In Excel 2016, Excel Mobile, and Excel Online, this function has
# been replaced with the CONCAT function. Although the CONCATENATE function is
# still available for backward compatibility, you should consider using CONCAT
# from now on. This is because CONCATENATE may not be available in future
# versions of Excel.
#
# BE AWARE; there are functional differences between CONACTENATE AND CONCAT
#
def concatenate(*args):
if tuple(flatten(args)) != args:
return ExcelError('#VALUE', 'Could not process arguments %s' % (args))
cat_string = ''.join(str(a) for a in args)
if len(cat_string) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE', 'Too long. concatentaed string should be no longer than %s but is %s' % (CELL_CHARACTER_LIMIT, len(cat_String)))
return cat_string
if __name__ == '__main__':
pass
|
anthill/koala | koala/excellib.py | xnpv | python | def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv | Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L1131-L1156 | null | # cython: profile=True
'''
Python equivalents of various excel functions
'''
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excellib.py
from __future__ import absolute_import, division
import numpy as np
import scipy.optimize
import datetime
from math import log, ceil
from decimal import Decimal, ROUND_UP, ROUND_HALF_UP
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from openpyxl.compat import unicode
from koala.utils import *
from koala.Range import RangeCore as Range
from koala.ExcelError import *
from functools import reduce
######################################################################################
# A dictionary that maps excel function names onto python equivalents. You should
# only add an entry to this map if the python name is different to the excel name
# (which it may need to be to prevent conflicts with existing python functions
# with that name, e.g., max).
# So if excel defines a function foobar(), all you have to do is add a function
# called foobar to this module. You only need to add it to the function map,
# if you want to use a different name in the python code.
# Note: some functions (if, pi, atan2, and, or, array, ...) are already taken care of
# in the FunctionNode code, so adding them here will have no effect.
FUNCTION_MAP = {
"ln":"xlog",
"min":"xmin",
"min":"xmin",
"max":"xmax",
"sum":"xsum",
"gammaln":"lgamma",
"round": "xround"
}
IND_FUN = [
"SUM",
"MIN",
"IF",
"TAN",
"ATAN2",
"PI",
"ARRAY",
"ARRAYROW",
"AND",
"OR",
"ALL",
"VALUE",
"LOG",
"MAX",
"SUMPRODUCT",
"IRR",
"MIN",
"SUM",
"CHOOSE",
"SUMIF",
"AVERAGE",
"RIGHT",
"INDEX",
"LOOKUP",
"LINEST",
"NPV",
"MATCH",
"MOD",
"COUNT",
"COUNTA",
"COUNTIF",
"COUNTIFS",
"MATCH",
"LOOKUP",
"INDEX",
"AVERAGE",
"SUMIFS",
"ROUND",
"ROWS",
"COLUMNS",
"MID",
"DATE",
"YEARFRAC",
"ISNA",
"ISBLANK",
"ISTEXT",
"OFFSET",
"SUMPRODUCT",
"IFERROR",
"IRR",
"XIRR",
"VLOOKUP",
"VDB",
"SLN",
"XNPV",
"PMT",
"ROUNDUP",
"POWER",
"SQRT",
"TODAY",
"YEAR",
"MONTH",
"EOMONTH",
]
CELL_CHARACTER_LIMIT = 32767
EXCEL_EPOCH = datetime.datetime.strptime("1900-01-01", '%Y-%m-%d').date()
######################################################################################
# List of excel equivalent functions
# TODO: needs unit testing
def value(text):
# make the distinction for naca numbers
if text.find('.') > 0:
return float(text)
elif text.endswith('%'):
text = text.replace('%', '')
return float(text) / 100
else:
return int(text)
def xlog(a):
if isinstance(a,(list,tuple,np.ndarray)):
return [log(x) for x in flatten(a)]
else:
#print a
return log(a)
def xmax(*args): # Excel reference: https://support.office.com/en-us/article/MAX-function-e0012414-9ac8-4b34-9a47-73e662c08098
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return max(values)
def xmin(*args): # Excel reference: https://support.office.com/en-us/article/MIN-function-61635d12-920f-4ce2-a70f-96f202dcc152
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return min(values)
def xsum(*args): # Excel reference: https://support.office.com/en-us/article/SUM-function-043e1c7d-7726-4e80-8f32-07b23e057f89
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return sum(values)
def choose(index_num, *values): # Excel reference: https://support.office.com/en-us/article/CHOOSE-function-fc5c184f-cb62-4ec7-a46e-38653b98f5bc
index = int(index_num)
if index <= 0 or index > 254:
return ExcelError('#VALUE!', '%s must be between 1 and 254' % str(index_num))
elif index > len(values):
return ExcelError('#VALUE!', '%s must not be larger than the number of values: %s' % (str(index_num), len(values)))
else:
return values[index - 1]
def sumif(range, criteria, sum_range = None): # Excel reference: https://support.office.com/en-us/article/SUMIF-function-169b8c99-c05c-4483-a712-1697a653039b
# WARNING:
# - wildcards not supported
# - doesn't really follow 2nd remark about sum_range length
if not isinstance(range, Range):
return TypeError('%s must be a Range' % str(range))
if isinstance(criteria, Range) and not isinstance(criteria , (str, bool)): # ugly...
return 0
indexes = find_corresponding_index(range.values, criteria)
if sum_range:
if not isinstance(sum_range, Range):
return TypeError('%s must be a Range' % str(sum_range))
def f(x):
return sum_range.values[x] if x < sum_range.length else 0
return sum(map(f, indexes))
else:
return sum([range.values[x] for x in indexes])
def sumifs(*args):
# Excel reference: https://support.office.com/en-us/article/
# sumifs-function-c9e748f5-7ea7-455d-9406-611cebce642b
nb_criteria = (len(args)-1) / 2
args = list(args)
# input checks
if nb_criteria == 0:
return TypeError('At least one criteria and criteria range should be provided.')
if int(nb_criteria) != nb_criteria:
return TypeError('Number of criteria an criteria ranges should be equal.')
nb_criteria = int(nb_criteria)
# separate arguments
sum_range = args[0]
criteria_ranges = args[1::2]
criteria = args[2::2]
index = list(range(0, len(sum_range)))
for i in range(nb_criteria):
criteria_range = criteria_ranges[i]
criterion = str(criteria[i])
index_tmp = find_corresponding_index(criteria_range.values, criterion)
index = np.intersect1d(index, index_tmp)
sum_select = [sum_range.values[i] for i in index]
res = sum(sum_select)
return res
def average(*args): # Excel reference: https://support.office.com/en-us/article/AVERAGE-function-047bac88-d466-426c-a32b-8f33eb960cf6
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
return sum(values) / len(values)
def right(text,n):
#TODO: hack to deal with naca section numbers
if isinstance(text, unicode) or isinstance(text,str):
return text[-n:]
else:
# TODO: get rid of the decimal
return str(int(text))[-n:]
def index(my_range, row, col = None): # Excel reference: https://support.office.com/en-us/article/INDEX-function-a5dcf0dd-996d-40a4-a822-b56b061328bd
for i in [my_range, row, col]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
row = int(row) if row is not None else row
col = int(col) if col is not None else col
if isinstance(my_range, Range):
cells = my_range.addresses
nr = my_range.nrows
nc = my_range.ncols
else:
cells, nr, nc = my_range
if nr > 1 or nc > 1:
a = np.array(cells)
cells = a.flatten().tolist()
nr = int(nr)
nc = int(nc)
if type(cells) != list:
return ExcelError('#VALUE!', '%s must be a list' % str(cells))
if row is not None and not is_number(row):
return ExcelError('#VALUE!', '%s must be a number' % str(row))
if row == 0 and col == 0:
return ExcelError('#VALUE!', 'No index asked for Range')
if col is None and nr == 1 and row <= nc:
# special case where index is matched on row, and the second row input can be used as a col
col = row
row = None
if row is not None and row > nr:
return ExcelError('#VALUE!', 'Index %i out of range' % row)
if nr == 1:
col = row if col is None else col
return cells[int(col) - 1]
if nc == 1:
return cells[int(row) - 1]
else: # could be optimised
if col is None or row is None:
return ExcelError('#VALUE!', 'Range is 2 dimensional, can not reach value with 1 arg as None')
if not is_number(col):
return ExcelError('#VALUE!', '%s must be a number' % str(col))
if col > nc:
return ExcelError('#VALUE!', 'Index %i out of range' % col)
indices = list(range(len(cells)))
if row == 0: # get column
filtered_indices = [x for x in indices if x % nc == col - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
elif col == 0: # get row
filtered_indices = [x for x in indices if int(x / nc) == row - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
else:
return cells[(row - 1)* nc + (col - 1)]
def lookup(value, lookup_range, result_range = None): # Excel reference: https://support.office.com/en-us/article/LOOKUP-function-446d94af-663b-451d-8251-369d5e3864cb
# TODO
if not isinstance(value,(int,float)):
return Exception("Non numeric lookups (%s) not supported" % value)
# TODO: note, may return the last equal value
# index of the last numeric value
lastnum = -1
for i,v in enumerate(lookup_range.values):
if isinstance(v,(int,float)):
if v > value:
break
else:
lastnum = i
output_range = result_range.values if result_range is not None else lookup_range.values
if lastnum < 0:
return ExcelError('#VALUE!', 'No numeric data found in the lookup range')
else:
if i == 0:
return ExcelError('#VALUE!', 'All values in the lookup range are bigger than %s' % value)
else:
if i >= len(lookup_range)-1:
# return the biggest number smaller than value
return output_range[lastnum]
else:
return output_range[i-1]
# NEEDS TEST
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d
Y = list(args[0].values())
X = list(args[1].values())
if len(args) == 3:
const = args[2]
if isinstance(const,str):
const = (const.lower() == "true")
else:
const = True
degree = kwargs.get('degree',1)
# build the vandermonde matrix
A = np.vander(X, degree+1)
if not const:
# force the intercept to zero
A[:,-1] = np.zeros((1,len(X)))
# perform the fit
(coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
return coefs
# NEEDS TEST
def npv(*args): # Excel reference: https://support.office.com/en-us/article/NPV-function-8672cb67-2576-4d07-b67b-ac28acf2a568
discount_rate = args[0]
cashflow = args[1]
if isinstance(cashflow, Range):
cashflow = cashflow.values
return sum([float(x)*(1+discount_rate)**-(i+1) for (i,x) in enumerate(cashflow)])
def rows(array):
"""
Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows.
"""
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows
def columns(array):
"""
Function to find the number of columns in an array.
Excel reference: https://support.office.com/en-us/article/columns-function-4e8e7b4e-e603-43e8-b177-956088fa48ca
:param array: the array of which the columns should be counted.
:return: the number of columns.
"""
return rows(array)
def match(lookup_value, lookup_range, match_type=1): # Excel reference: https://support.office.com/en-us/article/MATCH-function-e8dffd45-c762-47d6-bf89-533f4a37673a
if not isinstance(lookup_range, Range):
return ExcelError('#VALUE!', 'Lookup_range is not a Range')
def type_convert(value):
if type(value) == str:
value = value.lower()
elif type(value) == int:
value = float(value)
elif value is None:
value = 0
return value;
def type_convert_float(value):
if is_number(value):
value = float(value)
else:
value = None
return value
lookup_value = type_convert(lookup_value)
range_values = [x for x in lookup_range.values if x is not None] # filter None values to avoid asc/desc order errors
range_length = len(range_values)
if match_type == 1:
# Verify ascending sort
posMax = -1
for i in range(range_length):
current = type_convert(range_values[i])
if i < range_length - 1:
if current > type_convert(range_values[i + 1]):
return ExcelError('#VALUE!', 'for match_type 1, lookup_range must be sorted ascending')
if current <= lookup_value:
posMax = i
if posMax == -1:
return ExcelError('#VALUE!','no result in lookup_range for match_type 1')
return posMax +1 #Excel starts at 1
elif match_type == 0:
# No string wildcard
try:
if is_number(lookup_value):
lookup_value = float(lookup_value)
output = [type_convert_float(x) for x in range_values].index(lookup_value) + 1
else:
output = [str(x).lower() for x in range_values].index(lookup_value) + 1
return output
except:
return ExcelError('#VALUE!', '%s not found' % lookup_value)
elif match_type == -1:
# Verify descending sort
posMin = -1
for i in range((range_length)):
current = type_convert(range_values[i])
if i is not range_length-1 and current < type_convert(range_values[i+1]):
return ExcelError('#VALUE!','for match_type -1, lookup_range must be sorted descending')
if current >= lookup_value:
posMin = i
if posMin == -1:
return ExcelError('#VALUE!', 'no result in lookup_range for match_type -1')
return posMin +1 #Excel starts at 1
def mod(nb, q): # Excel Reference: https://support.office.com/en-us/article/MOD-function-9b6cd169-b6ee-406a-a97b-edf2a9dc24f3
if not isinstance(nb, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(nb))
elif not isinstance(q, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(q))
else:
return nb % q
def eomonth(start_date, months): # Excel reference: https://support.office.com/en-us/article/eomonth-function-7314ffa1-2bc9-4005-9d66-f49db127d628
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if not is_number(months):
return ExcelError('#VALUE!', 'months %s must be a number' % str(months))
y1, m1, d1 = date_from_int(start_date)
start_date_d = datetime.date(year=y1, month=m1, day=d1)
end_date_d = start_date_d + relativedelta(months=months)
y2 = end_date_d.year
m2 = end_date_d.month
d2 = monthrange(y2, m2)[1]
res = int(int_from_date(datetime.date(y2, m2, d2)))
return res
def year(serial_number): # Excel reference: https://support.office.com/en-us/article/year-function-c64f017a-1354-490d-981f-578e8ec8d3b9
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return y1
def month(serial_number): # Excel reference: https://support.office.com/en-us/article/month-function-579a2881-199b-48b2-ab90-ddba0eba86e8
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return m1
def count(*args): # Excel reference: https://support.office.com/en-us/article/COUNT-function-a59cd7fc-b623-4d93-87a4-d23bf411294c
l = list(args)
total = 0
for arg in l:
if isinstance(arg, Range):
total += len([x for x in arg.values if is_number(x) and type(x) is not bool]) # count inside a list
elif is_number(arg): # int() is used for text representation of numbers
total += 1
return total
def counta(range):
if isinstance(range, ExcelError) or range in ErrorCodes:
if range.value == '#NULL':
return 0
else:
return range # return the Excel Error
# raise Exception('ExcelError other than #NULL passed to excellib.counta()')
else:
return len([x for x in range.values if x != None])
def countif(range, criteria): # Excel reference: https://support.office.com/en-us/article/COUNTIF-function-e0de10c6-f885-4e71-abb4-1f464816df34
# WARNING:
# - wildcards not supported
# - support of strings with >, <, <=, =>, <> not provided
valid = find_corresponding_index(range.values, criteria)
return len(valid)
def countifs(*args): # Excel reference: https://support.office.com/en-us/article/COUNTIFS-function-dda3dc6e-f74e-4aee-88bc-aa8c2a866842
arg_list = list(args)
l = len(arg_list)
if l % 2 != 0:
return ExcelError('#VALUE!', 'excellib.countifs() must have a pair number of arguments, here %d' % l)
if l >= 2:
indexes = find_corresponding_index(args[0].values, args[1]) # find indexes that match first layer of countif
remaining_ranges = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 0] # get only ranges
remaining_criteria = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 1] # get only criteria
# verif that all Ranges are associated COULDNT MAKE THIS WORK CORRECTLY BECAUSE OF RECURSION
# association_type = None
# temp = [args[0]] + remaining_ranges
# for index, range in enumerate(temp): # THIS IS SHIT, but works ok
# if type(range) == Range and index < len(temp) - 1:
# asso_type = range.is_associated(temp[index + 1])
# print 'asso', asso_type
# if association_type is None:
# association_type = asso_type
# elif associated_type != asso_type:
# association_type = None
# break
# print 'ASSO', association_type
# if association_type is None:
# return ValueError('All items must be Ranges and associated')
filtered_remaining_ranges = []
for range in remaining_ranges: # filter items in remaining_ranges that match valid indexes from first countif layer
filtered_remaining_cells = []
filtered_remaining_range = []
for index, item in enumerate(range.values):
if index in indexes:
filtered_remaining_cells.append(range.addresses[index]) # reconstructing cells from indexes
filtered_remaining_range.append(item) # reconstructing values from indexes
# WARNING HERE
filtered_remaining_ranges.append(Range(filtered_remaining_cells, filtered_remaining_range))
new_tuple = ()
for index, range in enumerate(filtered_remaining_ranges): # rebuild the tuple that will be the argument of next layer
new_tuple += (range, remaining_criteria[index])
return min(countifs(*new_tuple), len(indexes)) # only consider the minimum number across all layer responses
else:
return float('inf')
def xround(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUND-function-c018c5d8-40fb-4053-90b1-b3e7f61a213c
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_HALF_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return round(number, num_digits)
def roundup(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUNDUP-function-f8bc9b23-e795-47db-8703-db171d0c42a7
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return ceil(number / pow(10, -num_digits)) * pow(10, -num_digits)
def mid(text, start_num, num_chars): # Excel reference: https://support.office.com/en-us/article/MID-MIDB-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028
text = str(text)
if len(text) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE!', 'text is too long. Is %s needs to be %s or less.' % (len(text), CELL_CHARACTER_LIMIT))
if type(start_num) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(start_num))
if type(num_chars) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(num_chars))
if start_num < 1:
return ExcelError('#VALUE!', '%s is < 1' % str(start_num))
if num_chars < 0:
return ExcelError('#VALUE!', '%s is < 0' % str(num_chars))
return text[(start_num - 1): (start_num - 1 + num_chars)]
def date(year, month, day): # Excel reference: https://support.office.com/en-us/article/DATE-function-e36c0c8c-4104-49da-ab83-82328b832349
if type(year) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(year))
if type(month) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(month))
if type(day) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(day))
if year < 0 or year > 9999:
return ExcelError('#VALUE!', 'Year must be between 1 and 9999, instead %s' % str(year))
if year < 1900:
year = 1900 + year
year, month, day = normalize_year(year, month, day) # taking into account negative month and day values
date_0 = datetime.datetime(1900, 1, 1)
date = datetime.datetime(year, month, day)
result = (datetime.datetime(year, month, day) - date_0).days + 2
if result <= 0:
return ExcelError('#VALUE!', 'Date result is negative')
else:
return result
def yearfrac(start_date, end_date, basis = 0): # Excel reference: https://support.office.com/en-us/article/YEARFRAC-function-3844141e-c76d-4143-82b6-208454ddc6a8
def actual_nb_days_ISDA(start, end): # needed to separate days_in_leap_year from days_not_leap_year
y1, m1, d1 = start
y2, m2, d2 = end
days_in_leap_year = 0
days_not_in_leap_year = 0
year_range = list(range(y1, y2 + 1))
for y in year_range:
if y == y1 and y == y2:
nb_days = date(y2, m2, d2) - date(y1, m1, d1)
elif y == y1:
nb_days = date(y1 + 1, 1, 1) - date(y1, m1, d1)
elif y == y2:
nb_days = date(y2, m2, d2) - date(y2, 1, 1)
else:
nb_days = 366 if is_leap_year(y) else 365
if is_leap_year(y):
days_in_leap_year += nb_days
else:
days_not_in_leap_year += nb_days
return (days_not_in_leap_year, days_in_leap_year)
def actual_nb_days_AFB_alter(start, end): # http://svn.finmath.net/finmath%20lib/trunk/src/main/java/net/finmath/time/daycount/DayCountConvention_ACT_ACT_YEARFRAC.java
y1, m1, d1 = start
y2, m2, d2 = end
delta = date(*end) - date(*start)
if delta <= 365:
if is_leap_year(y1) and is_leap_year(y2):
denom = 366
elif is_leap_year(y1) and date(y1, m1, d1) <= date(y1, 2, 29):
denom = 366
elif is_leap_year(y2) and date(y2, m2, d2) >= date(y2, 2, 29):
denom = 366
else:
denom = 365
else:
year_range = list(range(y1, y2 + 1))
nb = 0
for y in year_range:
nb += 366 if is_leap_year(y) else 365
denom = nb / len(year_range)
return delta / denom
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if not is_number(end_date):
return ExcelError('#VALUE!', 'end_date %s must be number' % str(end_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if end_date < 0:
return ExcelError('#VALUE!', 'end_date %s must be positive' % str(end_date))
if start_date > end_date: # switch dates if start_date > end_date
temp = end_date
end_date = start_date
start_date = temp
y1, m1, d1 = date_from_int(start_date)
y2, m2, d2 = date_from_int(end_date)
if basis == 0: # US 30/360
d2 = 30 if d2 == 31 and (d1 == 31 or d1 == 30) else min(d2, 31)
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
elif basis == 1: # Actual/actual
result = actual_nb_days_AFB_alter((y1, m1, d1), (y2, m2, d2))
elif basis == 2: # Actual/360
result = (end_date - start_date) / 360
elif basis == 3: # Actual/365
result = (end_date - start_date) / 365
elif basis == 4: # Eurobond 30/360
d2 = 30 if d2 == 31 else d2
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
else:
return ExcelError('#VALUE!', '%d must be 0, 1, 2, 3 or 4' % basis)
return result
def isna(value):
# This function might need more solid testing
try:
eval(value)
return False
except:
return True
def isblank(value):
return value is None
def istext(value):
return type(value) == str
def offset(reference, rows, cols, height=None, width=None): # Excel reference: https://support.office.com/en-us/article/OFFSET-function-c8de19ae-dd79-4b9b-a14e-b4d906d11b66
# This function accepts a list of addresses
# Maybe think of passing a Range as first argument
for i in [reference, rows, cols, height, width]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
rows = int(rows)
cols = int(cols)
# get first cell address of reference
if is_range(reference):
ref = resolve_range(reference, should_flatten = True)[0][0]
else:
ref = reference
ref_sheet = ''
end_address = ''
if '!' in ref:
ref_sheet = ref.split('!')[0] + '!'
ref_cell = ref.split('!')[1]
else:
ref_cell = ref
found = re.search(CELL_REF_RE, ref)
new_col = col2num(found.group(1)) + cols
new_row = int(found.group(2)) + rows
if new_row <= 0 or new_col <= 0:
return ExcelError('#VALUE!', 'Offset is out of bounds')
start_address = str(num2col(new_col)) + str(new_row)
if (height is not None and width is not None):
if type(height) != int:
return ExcelError('#VALUE!', '%d must not be integer' % height)
if type(width) != int:
return ExcelError('#VALUE!', '%d must not be integer' % width)
if height > 0:
end_row = new_row + height - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % height)
if width > 0:
end_col = new_col + width - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % width)
end_address = ':' + str(num2col(end_col)) + str(end_row)
elif height and not width or not height and width:
return ExcelError('Height and width must be passed together')
return ref_sheet + start_address + end_address
def sumproduct(*ranges): # Excel reference: https://support.office.com/en-us/article/SUMPRODUCT-function-16753e75-9f68-4874-94ac-4d2145a2fd2e
range_list = list(ranges)
for r in range_list: # if a range has no values (i.e if it's empty)
if len(r.values) == 0:
return 0
for range in range_list:
for item in range.values:
# If there is an ExcelError inside a Range, sumproduct should output an ExcelError
if isinstance(item, ExcelError):
return ExcelError("#N/A", "ExcelErrors are present in the sumproduct items")
reduce(check_length, range_list) # check that all ranges have the same size
return reduce(lambda X, Y: X + Y, reduce(lambda x, y: Range.apply_all('multiply', x, y), range_list).values)
def iferror(value, value_if_error): # Excel reference: https://support.office.com/en-us/article/IFERROR-function-c526fd07-caeb-47b8-8bb6-63f3e417f611
if isinstance(value, ExcelError) or value in ErrorCodes:
return value_if_error
else:
return value
def irr(values, guess = None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e)
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10)
def vlookup(lookup_value, table_array, col_index_num, range_lookup = True): # https://support.office.com/en-us/article/VLOOKUP-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1
if not isinstance(table_array, Range):
return ExcelError('#VALUE', 'table_array should be a Range')
if col_index_num > table_array.ncols:
return ExcelError('#VALUE', 'col_index_num is greater than the number of cols in table_array')
first_column = table_array.get(0, 1)
result_column = table_array.get(0, col_index_num)
if not range_lookup:
if lookup_value not in first_column.values:
return ExcelError('#N/A', 'lookup_value not in first column of table_array')
else:
i = first_column.values.index(lookup_value)
ref = first_column.order[i]
else:
i = None
for v in first_column.values:
if lookup_value >= v:
i = first_column.values.index(v)
ref = first_column.order[i]
else:
break
if i is None:
return ExcelError('#N/A', 'lookup_value smaller than all values of table_array')
return Range.find_associated_value(ref, result_column)
def sln(cost, salvage, life): # Excel reference: https://support.office.com/en-us/article/SLN-function-cdb666e5-c1c6-40a7-806a-e695edc2f1c8
for arg in [cost, salvage, life]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
return (cost - salvage) / life
def vdb(cost, salvage, life, start_period, end_period, factor = 2, no_switch = False): # Excel reference: https://support.office.com/en-us/article/VDB-function-dde4e207-f3fa-488d-91d2-66d55e861d73
for arg in [cost, salvage, life, start_period, end_period, factor, no_switch]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
for arg in [cost, salvage, life, start_period, end_period, factor]:
if not isinstance(arg, (float, int)):
return ExcelError('#VALUE', 'Arg %s should be an int, float or long, instead: %s' % (arg, type(arg)))
start_period = start_period
end_period = end_period
sln_depr = sln(cost, salvage, life)
depr_rate = factor / life
acc_depr = 0
depr = 0
switch_to_sln = False
sln_depr = 0
result = 0
start_life = 0
delta_life = life % 1
if delta_life > 0: # to handle cases when life is not an integer
end_life = int(life + 1)
else:
end_life = int(life)
periods = list(range(start_life, end_life))
if int(start_period) != start_period:
delta_start = abs(int(start_period) - start_period)
depr = (cost - acc_depr) * depr_rate * delta_start
acc_depr += depr
start_life = 1
periods = [x + 0.5 for x in periods]
for index, current_year in enumerate(periods):
if not no_switch: # no_switch = False (Default Case)
if switch_to_sln:
depr = sln_depr
else:
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
temp_sln_depr = sln(cost, salvage, life)
if depr < temp_sln_depr:
switch_to_sln = True
fixed_remaining_years = life - current_year - 1
fixed_remaining_cost = cost - acc_depr
# we need to check future sln: current depr should never be smaller than sln to come
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
if sln_depr > depr: # if it's the case, we switch to sln earlier than the regular case
# cancel what has been done
acc_depr -= depr
fixed_remaining_years += 1
fixed_remaining_cost = cost - acc_depr
# recalculate depreciation
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
depr = sln_depr
acc_depr += depr
else: # no_switch = True
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
delta_start = abs(current_year - start_period)
if delta_start < 1 and delta_start != 0:
result += depr * (1 - delta_start)
elif current_year >= start_period and current_year < end_period:
delta_end = abs(end_period - current_year)
if delta_end < 1 and delta_end != 0:
result += depr * delta_end
else:
result += depr
return result
def pmt(*args): # Excel reference: https://support.office.com/en-us/article/PMT-function-0214da64-9a63-4996-bc20-214433fa6441
rate = args[0]
num_payments = args[1]
present_value = args[2]
# WARNING fv & type not used yet - both are assumed to be their defaults (0)
# fv = args[3]
# type = args[4]
return -present_value * rate / (1 - np.power(1 + rate, -num_payments))
# https://support.office.com/en-us/article/POWER-function-D3F2908B-56F4-4C3F-895A-07FB519C362A
def power(number, power):
if number == power == 0:
# Really excel? What were you thinking?
return ExcelError('#NUM!', 'Number and power cannot both be zero' % str(number))
if power < 1 and number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(number))
return np.power(number, power)
# https://support.office.com/en-ie/article/sqrt-function-654975c2-05c4-4831-9a24-2c65e4040fdf
def sqrt(number):
if number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(index_num))
return np.sqrt(number)
# https://support.office.com/en-ie/article/today-function-5eb3078d-a82c-4736-8930-2f51a028fdd9
def today():
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
"""Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date."""
return days_since_epoch.days + 2
# https://support.office.com/en-us/article/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2
def concat(*args):
return concatenate(*tuple(flatten(args)))
# https://support.office.com/en-us/article/CONCATENATE-function-8F8AE884-2CA8-4F7A-B093-75D702BEA31D
# Important: In Excel 2016, Excel Mobile, and Excel Online, this function has
# been replaced with the CONCAT function. Although the CONCATENATE function is
# still available for backward compatibility, you should consider using CONCAT
# from now on. This is because CONCATENATE may not be available in future
# versions of Excel.
#
# BE AWARE; there are functional differences between CONACTENATE AND CONCAT
#
def concatenate(*args):
if tuple(flatten(args)) != args:
return ExcelError('#VALUE', 'Could not process arguments %s' % (args))
cat_string = ''.join(str(a) for a in args)
if len(cat_string) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE', 'Too long. concatentaed string should be no longer than %s but is %s' % (CELL_CHARACTER_LIMIT, len(cat_String)))
return cat_string
if __name__ == '__main__':
pass
|
anthill/koala | koala/excellib.py | today | python | def today():
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
return days_since_epoch.days + 2 | Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L1190-L1200 | null | # cython: profile=True
'''
Python equivalents of various excel functions
'''
# source: https://github.com/dgorissen/pycel/blob/master/src/pycel/excellib.py
from __future__ import absolute_import, division
import numpy as np
import scipy.optimize
import datetime
from math import log, ceil
from decimal import Decimal, ROUND_UP, ROUND_HALF_UP
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from openpyxl.compat import unicode
from koala.utils import *
from koala.Range import RangeCore as Range
from koala.ExcelError import *
from functools import reduce
######################################################################################
# A dictionary that maps excel function names onto python equivalents. You should
# only add an entry to this map if the python name is different to the excel name
# (which it may need to be to prevent conflicts with existing python functions
# with that name, e.g., max).
# So if excel defines a function foobar(), all you have to do is add a function
# called foobar to this module. You only need to add it to the function map,
# if you want to use a different name in the python code.
# Note: some functions (if, pi, atan2, and, or, array, ...) are already taken care of
# in the FunctionNode code, so adding them here will have no effect.
FUNCTION_MAP = {
"ln":"xlog",
"min":"xmin",
"min":"xmin",
"max":"xmax",
"sum":"xsum",
"gammaln":"lgamma",
"round": "xround"
}
IND_FUN = [
"SUM",
"MIN",
"IF",
"TAN",
"ATAN2",
"PI",
"ARRAY",
"ARRAYROW",
"AND",
"OR",
"ALL",
"VALUE",
"LOG",
"MAX",
"SUMPRODUCT",
"IRR",
"MIN",
"SUM",
"CHOOSE",
"SUMIF",
"AVERAGE",
"RIGHT",
"INDEX",
"LOOKUP",
"LINEST",
"NPV",
"MATCH",
"MOD",
"COUNT",
"COUNTA",
"COUNTIF",
"COUNTIFS",
"MATCH",
"LOOKUP",
"INDEX",
"AVERAGE",
"SUMIFS",
"ROUND",
"ROWS",
"COLUMNS",
"MID",
"DATE",
"YEARFRAC",
"ISNA",
"ISBLANK",
"ISTEXT",
"OFFSET",
"SUMPRODUCT",
"IFERROR",
"IRR",
"XIRR",
"VLOOKUP",
"VDB",
"SLN",
"XNPV",
"PMT",
"ROUNDUP",
"POWER",
"SQRT",
"TODAY",
"YEAR",
"MONTH",
"EOMONTH",
]
CELL_CHARACTER_LIMIT = 32767
EXCEL_EPOCH = datetime.datetime.strptime("1900-01-01", '%Y-%m-%d').date()
######################################################################################
# List of excel equivalent functions
# TODO: needs unit testing
def value(text):
# make the distinction for naca numbers
if text.find('.') > 0:
return float(text)
elif text.endswith('%'):
text = text.replace('%', '')
return float(text) / 100
else:
return int(text)
def xlog(a):
if isinstance(a,(list,tuple,np.ndarray)):
return [log(x) for x in flatten(a)]
else:
#print a
return log(a)
def xmax(*args): # Excel reference: https://support.office.com/en-us/article/MAX-function-e0012414-9ac8-4b34-9a47-73e662c08098
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return max(values)
def xmin(*args): # Excel reference: https://support.office.com/en-us/article/MIN-function-61635d12-920f-4ce2-a70f-96f202dcc152
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return min(values)
def xsum(*args): # Excel reference: https://support.office.com/en-us/article/SUM-function-043e1c7d-7726-4e80-8f32-07b23e057f89
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
# however, if no non numeric cells, return zero (is what excel does)
if len(values) < 1:
return 0
else:
return sum(values)
def choose(index_num, *values): # Excel reference: https://support.office.com/en-us/article/CHOOSE-function-fc5c184f-cb62-4ec7-a46e-38653b98f5bc
index = int(index_num)
if index <= 0 or index > 254:
return ExcelError('#VALUE!', '%s must be between 1 and 254' % str(index_num))
elif index > len(values):
return ExcelError('#VALUE!', '%s must not be larger than the number of values: %s' % (str(index_num), len(values)))
else:
return values[index - 1]
def sumif(range, criteria, sum_range = None): # Excel reference: https://support.office.com/en-us/article/SUMIF-function-169b8c99-c05c-4483-a712-1697a653039b
# WARNING:
# - wildcards not supported
# - doesn't really follow 2nd remark about sum_range length
if not isinstance(range, Range):
return TypeError('%s must be a Range' % str(range))
if isinstance(criteria, Range) and not isinstance(criteria , (str, bool)): # ugly...
return 0
indexes = find_corresponding_index(range.values, criteria)
if sum_range:
if not isinstance(sum_range, Range):
return TypeError('%s must be a Range' % str(sum_range))
def f(x):
return sum_range.values[x] if x < sum_range.length else 0
return sum(map(f, indexes))
else:
return sum([range.values[x] for x in indexes])
def sumifs(*args):
# Excel reference: https://support.office.com/en-us/article/
# sumifs-function-c9e748f5-7ea7-455d-9406-611cebce642b
nb_criteria = (len(args)-1) / 2
args = list(args)
# input checks
if nb_criteria == 0:
return TypeError('At least one criteria and criteria range should be provided.')
if int(nb_criteria) != nb_criteria:
return TypeError('Number of criteria an criteria ranges should be equal.')
nb_criteria = int(nb_criteria)
# separate arguments
sum_range = args[0]
criteria_ranges = args[1::2]
criteria = args[2::2]
index = list(range(0, len(sum_range)))
for i in range(nb_criteria):
criteria_range = criteria_ranges[i]
criterion = str(criteria[i])
index_tmp = find_corresponding_index(criteria_range.values, criterion)
index = np.intersect1d(index, index_tmp)
sum_select = [sum_range.values[i] for i in index]
res = sum(sum_select)
return res
def average(*args): # Excel reference: https://support.office.com/en-us/article/AVERAGE-function-047bac88-d466-426c-a32b-8f33eb960cf6
# ignore non numeric cells and boolean cells
values = extract_numeric_values(*args)
return sum(values) / len(values)
def right(text,n):
#TODO: hack to deal with naca section numbers
if isinstance(text, unicode) or isinstance(text,str):
return text[-n:]
else:
# TODO: get rid of the decimal
return str(int(text))[-n:]
def index(my_range, row, col = None): # Excel reference: https://support.office.com/en-us/article/INDEX-function-a5dcf0dd-996d-40a4-a822-b56b061328bd
for i in [my_range, row, col]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
row = int(row) if row is not None else row
col = int(col) if col is not None else col
if isinstance(my_range, Range):
cells = my_range.addresses
nr = my_range.nrows
nc = my_range.ncols
else:
cells, nr, nc = my_range
if nr > 1 or nc > 1:
a = np.array(cells)
cells = a.flatten().tolist()
nr = int(nr)
nc = int(nc)
if type(cells) != list:
return ExcelError('#VALUE!', '%s must be a list' % str(cells))
if row is not None and not is_number(row):
return ExcelError('#VALUE!', '%s must be a number' % str(row))
if row == 0 and col == 0:
return ExcelError('#VALUE!', 'No index asked for Range')
if col is None and nr == 1 and row <= nc:
# special case where index is matched on row, and the second row input can be used as a col
col = row
row = None
if row is not None and row > nr:
return ExcelError('#VALUE!', 'Index %i out of range' % row)
if nr == 1:
col = row if col is None else col
return cells[int(col) - 1]
if nc == 1:
return cells[int(row) - 1]
else: # could be optimised
if col is None or row is None:
return ExcelError('#VALUE!', 'Range is 2 dimensional, can not reach value with 1 arg as None')
if not is_number(col):
return ExcelError('#VALUE!', '%s must be a number' % str(col))
if col > nc:
return ExcelError('#VALUE!', 'Index %i out of range' % col)
indices = list(range(len(cells)))
if row == 0: # get column
filtered_indices = [x for x in indices if x % nc == col - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
elif col == 0: # get row
filtered_indices = [x for x in indices if int(x / nc) == row - 1]
filtered_cells = [cells[i] for i in filtered_indices]
return filtered_cells
else:
return cells[(row - 1)* nc + (col - 1)]
def lookup(value, lookup_range, result_range = None): # Excel reference: https://support.office.com/en-us/article/LOOKUP-function-446d94af-663b-451d-8251-369d5e3864cb
# TODO
if not isinstance(value,(int,float)):
return Exception("Non numeric lookups (%s) not supported" % value)
# TODO: note, may return the last equal value
# index of the last numeric value
lastnum = -1
for i,v in enumerate(lookup_range.values):
if isinstance(v,(int,float)):
if v > value:
break
else:
lastnum = i
output_range = result_range.values if result_range is not None else lookup_range.values
if lastnum < 0:
return ExcelError('#VALUE!', 'No numeric data found in the lookup range')
else:
if i == 0:
return ExcelError('#VALUE!', 'All values in the lookup range are bigger than %s' % value)
else:
if i >= len(lookup_range)-1:
# return the biggest number smaller than value
return output_range[lastnum]
else:
return output_range[i-1]
# NEEDS TEST
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d
Y = list(args[0].values())
X = list(args[1].values())
if len(args) == 3:
const = args[2]
if isinstance(const,str):
const = (const.lower() == "true")
else:
const = True
degree = kwargs.get('degree',1)
# build the vandermonde matrix
A = np.vander(X, degree+1)
if not const:
# force the intercept to zero
A[:,-1] = np.zeros((1,len(X)))
# perform the fit
(coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
return coefs
# NEEDS TEST
def npv(*args): # Excel reference: https://support.office.com/en-us/article/NPV-function-8672cb67-2576-4d07-b67b-ac28acf2a568
discount_rate = args[0]
cashflow = args[1]
if isinstance(cashflow, Range):
cashflow = cashflow.values
return sum([float(x)*(1+discount_rate)**-(i+1) for (i,x) in enumerate(cashflow)])
def rows(array):
"""
Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows.
"""
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows
def columns(array):
"""
Function to find the number of columns in an array.
Excel reference: https://support.office.com/en-us/article/columns-function-4e8e7b4e-e603-43e8-b177-956088fa48ca
:param array: the array of which the columns should be counted.
:return: the number of columns.
"""
return rows(array)
def match(lookup_value, lookup_range, match_type=1): # Excel reference: https://support.office.com/en-us/article/MATCH-function-e8dffd45-c762-47d6-bf89-533f4a37673a
if not isinstance(lookup_range, Range):
return ExcelError('#VALUE!', 'Lookup_range is not a Range')
def type_convert(value):
if type(value) == str:
value = value.lower()
elif type(value) == int:
value = float(value)
elif value is None:
value = 0
return value;
def type_convert_float(value):
if is_number(value):
value = float(value)
else:
value = None
return value
lookup_value = type_convert(lookup_value)
range_values = [x for x in lookup_range.values if x is not None] # filter None values to avoid asc/desc order errors
range_length = len(range_values)
if match_type == 1:
# Verify ascending sort
posMax = -1
for i in range(range_length):
current = type_convert(range_values[i])
if i < range_length - 1:
if current > type_convert(range_values[i + 1]):
return ExcelError('#VALUE!', 'for match_type 1, lookup_range must be sorted ascending')
if current <= lookup_value:
posMax = i
if posMax == -1:
return ExcelError('#VALUE!','no result in lookup_range for match_type 1')
return posMax +1 #Excel starts at 1
elif match_type == 0:
# No string wildcard
try:
if is_number(lookup_value):
lookup_value = float(lookup_value)
output = [type_convert_float(x) for x in range_values].index(lookup_value) + 1
else:
output = [str(x).lower() for x in range_values].index(lookup_value) + 1
return output
except:
return ExcelError('#VALUE!', '%s not found' % lookup_value)
elif match_type == -1:
# Verify descending sort
posMin = -1
for i in range((range_length)):
current = type_convert(range_values[i])
if i is not range_length-1 and current < type_convert(range_values[i+1]):
return ExcelError('#VALUE!','for match_type -1, lookup_range must be sorted descending')
if current >= lookup_value:
posMin = i
if posMin == -1:
return ExcelError('#VALUE!', 'no result in lookup_range for match_type -1')
return posMin +1 #Excel starts at 1
def mod(nb, q): # Excel Reference: https://support.office.com/en-us/article/MOD-function-9b6cd169-b6ee-406a-a97b-edf2a9dc24f3
if not isinstance(nb, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(nb))
elif not isinstance(q, int):
return ExcelError('#VALUE!', '%s is not an integer' % str(q))
else:
return nb % q
def eomonth(start_date, months): # Excel reference: https://support.office.com/en-us/article/eomonth-function-7314ffa1-2bc9-4005-9d66-f49db127d628
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if not is_number(months):
return ExcelError('#VALUE!', 'months %s must be a number' % str(months))
y1, m1, d1 = date_from_int(start_date)
start_date_d = datetime.date(year=y1, month=m1, day=d1)
end_date_d = start_date_d + relativedelta(months=months)
y2 = end_date_d.year
m2 = end_date_d.month
d2 = monthrange(y2, m2)[1]
res = int(int_from_date(datetime.date(y2, m2, d2)))
return res
def year(serial_number): # Excel reference: https://support.office.com/en-us/article/year-function-c64f017a-1354-490d-981f-578e8ec8d3b9
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return y1
def month(serial_number): # Excel reference: https://support.office.com/en-us/article/month-function-579a2881-199b-48b2-ab90-ddba0eba86e8
if not is_number(serial_number):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(serial_number))
if serial_number < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(serial_number))
y1, m1, d1 = date_from_int(serial_number)
return m1
def count(*args): # Excel reference: https://support.office.com/en-us/article/COUNT-function-a59cd7fc-b623-4d93-87a4-d23bf411294c
l = list(args)
total = 0
for arg in l:
if isinstance(arg, Range):
total += len([x for x in arg.values if is_number(x) and type(x) is not bool]) # count inside a list
elif is_number(arg): # int() is used for text representation of numbers
total += 1
return total
def counta(range):
if isinstance(range, ExcelError) or range in ErrorCodes:
if range.value == '#NULL':
return 0
else:
return range # return the Excel Error
# raise Exception('ExcelError other than #NULL passed to excellib.counta()')
else:
return len([x for x in range.values if x != None])
def countif(range, criteria): # Excel reference: https://support.office.com/en-us/article/COUNTIF-function-e0de10c6-f885-4e71-abb4-1f464816df34
# WARNING:
# - wildcards not supported
# - support of strings with >, <, <=, =>, <> not provided
valid = find_corresponding_index(range.values, criteria)
return len(valid)
def countifs(*args): # Excel reference: https://support.office.com/en-us/article/COUNTIFS-function-dda3dc6e-f74e-4aee-88bc-aa8c2a866842
arg_list = list(args)
l = len(arg_list)
if l % 2 != 0:
return ExcelError('#VALUE!', 'excellib.countifs() must have a pair number of arguments, here %d' % l)
if l >= 2:
indexes = find_corresponding_index(args[0].values, args[1]) # find indexes that match first layer of countif
remaining_ranges = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 0] # get only ranges
remaining_criteria = [elem for i, elem in enumerate(arg_list[2:]) if i % 2 == 1] # get only criteria
# verif that all Ranges are associated COULDNT MAKE THIS WORK CORRECTLY BECAUSE OF RECURSION
# association_type = None
# temp = [args[0]] + remaining_ranges
# for index, range in enumerate(temp): # THIS IS SHIT, but works ok
# if type(range) == Range and index < len(temp) - 1:
# asso_type = range.is_associated(temp[index + 1])
# print 'asso', asso_type
# if association_type is None:
# association_type = asso_type
# elif associated_type != asso_type:
# association_type = None
# break
# print 'ASSO', association_type
# if association_type is None:
# return ValueError('All items must be Ranges and associated')
filtered_remaining_ranges = []
for range in remaining_ranges: # filter items in remaining_ranges that match valid indexes from first countif layer
filtered_remaining_cells = []
filtered_remaining_range = []
for index, item in enumerate(range.values):
if index in indexes:
filtered_remaining_cells.append(range.addresses[index]) # reconstructing cells from indexes
filtered_remaining_range.append(item) # reconstructing values from indexes
# WARNING HERE
filtered_remaining_ranges.append(Range(filtered_remaining_cells, filtered_remaining_range))
new_tuple = ()
for index, range in enumerate(filtered_remaining_ranges): # rebuild the tuple that will be the argument of next layer
new_tuple += (range, remaining_criteria[index])
return min(countifs(*new_tuple), len(indexes)) # only consider the minimum number across all layer responses
else:
return float('inf')
def xround(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUND-function-c018c5d8-40fb-4053-90b1-b3e7f61a213c
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_HALF_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return round(number, num_digits)
def roundup(number, num_digits = 0): # Excel reference: https://support.office.com/en-us/article/ROUNDUP-function-f8bc9b23-e795-47db-8703-db171d0c42a7
if not is_number(number):
return ExcelError('#VALUE!', '%s is not a number' % str(number))
if not is_number(num_digits):
return ExcelError('#VALUE!', '%s is not a number' % str(num_digits))
number = float(number) # if you don't Spreadsheet.dump/load, you might end up with Long numbers, which Decimal doesn't accept
if num_digits >= 0: # round to the right side of the point
return float(Decimal(repr(number)).quantize(Decimal(repr(pow(10, -num_digits))), rounding=ROUND_UP))
# see https://docs.python.org/2/library/functions.html#round
# and https://gist.github.com/ejamesc/cedc886c5f36e2d075c5
else:
return ceil(number / pow(10, -num_digits)) * pow(10, -num_digits)
def mid(text, start_num, num_chars): # Excel reference: https://support.office.com/en-us/article/MID-MIDB-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028
text = str(text)
if len(text) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE!', 'text is too long. Is %s needs to be %s or less.' % (len(text), CELL_CHARACTER_LIMIT))
if type(start_num) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(start_num))
if type(num_chars) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(num_chars))
if start_num < 1:
return ExcelError('#VALUE!', '%s is < 1' % str(start_num))
if num_chars < 0:
return ExcelError('#VALUE!', '%s is < 0' % str(num_chars))
return text[(start_num - 1): (start_num - 1 + num_chars)]
def date(year, month, day): # Excel reference: https://support.office.com/en-us/article/DATE-function-e36c0c8c-4104-49da-ab83-82328b832349
if type(year) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(year))
if type(month) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(month))
if type(day) != int:
return ExcelError('#VALUE!', '%s is not an integer' % str(day))
if year < 0 or year > 9999:
return ExcelError('#VALUE!', 'Year must be between 1 and 9999, instead %s' % str(year))
if year < 1900:
year = 1900 + year
year, month, day = normalize_year(year, month, day) # taking into account negative month and day values
date_0 = datetime.datetime(1900, 1, 1)
date = datetime.datetime(year, month, day)
result = (datetime.datetime(year, month, day) - date_0).days + 2
if result <= 0:
return ExcelError('#VALUE!', 'Date result is negative')
else:
return result
def yearfrac(start_date, end_date, basis = 0): # Excel reference: https://support.office.com/en-us/article/YEARFRAC-function-3844141e-c76d-4143-82b6-208454ddc6a8
def actual_nb_days_ISDA(start, end): # needed to separate days_in_leap_year from days_not_leap_year
y1, m1, d1 = start
y2, m2, d2 = end
days_in_leap_year = 0
days_not_in_leap_year = 0
year_range = list(range(y1, y2 + 1))
for y in year_range:
if y == y1 and y == y2:
nb_days = date(y2, m2, d2) - date(y1, m1, d1)
elif y == y1:
nb_days = date(y1 + 1, 1, 1) - date(y1, m1, d1)
elif y == y2:
nb_days = date(y2, m2, d2) - date(y2, 1, 1)
else:
nb_days = 366 if is_leap_year(y) else 365
if is_leap_year(y):
days_in_leap_year += nb_days
else:
days_not_in_leap_year += nb_days
return (days_not_in_leap_year, days_in_leap_year)
def actual_nb_days_AFB_alter(start, end): # http://svn.finmath.net/finmath%20lib/trunk/src/main/java/net/finmath/time/daycount/DayCountConvention_ACT_ACT_YEARFRAC.java
y1, m1, d1 = start
y2, m2, d2 = end
delta = date(*end) - date(*start)
if delta <= 365:
if is_leap_year(y1) and is_leap_year(y2):
denom = 366
elif is_leap_year(y1) and date(y1, m1, d1) <= date(y1, 2, 29):
denom = 366
elif is_leap_year(y2) and date(y2, m2, d2) >= date(y2, 2, 29):
denom = 366
else:
denom = 365
else:
year_range = list(range(y1, y2 + 1))
nb = 0
for y in year_range:
nb += 366 if is_leap_year(y) else 365
denom = nb / len(year_range)
return delta / denom
if not is_number(start_date):
return ExcelError('#VALUE!', 'start_date %s must be a number' % str(start_date))
if not is_number(end_date):
return ExcelError('#VALUE!', 'end_date %s must be number' % str(end_date))
if start_date < 0:
return ExcelError('#VALUE!', 'start_date %s must be positive' % str(start_date))
if end_date < 0:
return ExcelError('#VALUE!', 'end_date %s must be positive' % str(end_date))
if start_date > end_date: # switch dates if start_date > end_date
temp = end_date
end_date = start_date
start_date = temp
y1, m1, d1 = date_from_int(start_date)
y2, m2, d2 = date_from_int(end_date)
if basis == 0: # US 30/360
d2 = 30 if d2 == 31 and (d1 == 31 or d1 == 30) else min(d2, 31)
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
elif basis == 1: # Actual/actual
result = actual_nb_days_AFB_alter((y1, m1, d1), (y2, m2, d2))
elif basis == 2: # Actual/360
result = (end_date - start_date) / 360
elif basis == 3: # Actual/365
result = (end_date - start_date) / 365
elif basis == 4: # Eurobond 30/360
d2 = 30 if d2 == 31 else d2
d1 = 30 if d1 == 31 else d1
count = 360 * (y2 - y1) + 30 * (m2 - m1) + (d2 - d1)
result = count / 360
else:
return ExcelError('#VALUE!', '%d must be 0, 1, 2, 3 or 4' % basis)
return result
def isna(value):
# This function might need more solid testing
try:
eval(value)
return False
except:
return True
def isblank(value):
return value is None
def istext(value):
return type(value) == str
def offset(reference, rows, cols, height=None, width=None): # Excel reference: https://support.office.com/en-us/article/OFFSET-function-c8de19ae-dd79-4b9b-a14e-b4d906d11b66
# This function accepts a list of addresses
# Maybe think of passing a Range as first argument
for i in [reference, rows, cols, height, width]:
if isinstance(i, ExcelError) or i in ErrorCodes:
return i
rows = int(rows)
cols = int(cols)
# get first cell address of reference
if is_range(reference):
ref = resolve_range(reference, should_flatten = True)[0][0]
else:
ref = reference
ref_sheet = ''
end_address = ''
if '!' in ref:
ref_sheet = ref.split('!')[0] + '!'
ref_cell = ref.split('!')[1]
else:
ref_cell = ref
found = re.search(CELL_REF_RE, ref)
new_col = col2num(found.group(1)) + cols
new_row = int(found.group(2)) + rows
if new_row <= 0 or new_col <= 0:
return ExcelError('#VALUE!', 'Offset is out of bounds')
start_address = str(num2col(new_col)) + str(new_row)
if (height is not None and width is not None):
if type(height) != int:
return ExcelError('#VALUE!', '%d must not be integer' % height)
if type(width) != int:
return ExcelError('#VALUE!', '%d must not be integer' % width)
if height > 0:
end_row = new_row + height - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % height)
if width > 0:
end_col = new_col + width - 1
else:
return ExcelError('#VALUE!', '%d must be strictly positive' % width)
end_address = ':' + str(num2col(end_col)) + str(end_row)
elif height and not width or not height and width:
return ExcelError('Height and width must be passed together')
return ref_sheet + start_address + end_address
def sumproduct(*ranges): # Excel reference: https://support.office.com/en-us/article/SUMPRODUCT-function-16753e75-9f68-4874-94ac-4d2145a2fd2e
range_list = list(ranges)
for r in range_list: # if a range has no values (i.e if it's empty)
if len(r.values) == 0:
return 0
for range in range_list:
for item in range.values:
# If there is an ExcelError inside a Range, sumproduct should output an ExcelError
if isinstance(item, ExcelError):
return ExcelError("#N/A", "ExcelErrors are present in the sumproduct items")
reduce(check_length, range_list) # check that all ranges have the same size
return reduce(lambda X, Y: X + Y, reduce(lambda x, y: Range.apply_all('multiply', x, y), range_list).values)
def iferror(value, value_if_error): # Excel reference: https://support.office.com/en-us/article/IFERROR-function-c526fd07-caeb-47b8-8bb6-63f3e417f611
if isinstance(value, ExcelError) or value in ErrorCodes:
return value_if_error
else:
return value
def irr(values, guess = None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e)
def xirr(values, dates, guess=0):
"""
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10)
def vlookup(lookup_value, table_array, col_index_num, range_lookup = True): # https://support.office.com/en-us/article/VLOOKUP-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1
if not isinstance(table_array, Range):
return ExcelError('#VALUE', 'table_array should be a Range')
if col_index_num > table_array.ncols:
return ExcelError('#VALUE', 'col_index_num is greater than the number of cols in table_array')
first_column = table_array.get(0, 1)
result_column = table_array.get(0, col_index_num)
if not range_lookup:
if lookup_value not in first_column.values:
return ExcelError('#N/A', 'lookup_value not in first column of table_array')
else:
i = first_column.values.index(lookup_value)
ref = first_column.order[i]
else:
i = None
for v in first_column.values:
if lookup_value >= v:
i = first_column.values.index(v)
ref = first_column.order[i]
else:
break
if i is None:
return ExcelError('#N/A', 'lookup_value smaller than all values of table_array')
return Range.find_associated_value(ref, result_column)
def sln(cost, salvage, life): # Excel reference: https://support.office.com/en-us/article/SLN-function-cdb666e5-c1c6-40a7-806a-e695edc2f1c8
for arg in [cost, salvage, life]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
return (cost - salvage) / life
def vdb(cost, salvage, life, start_period, end_period, factor = 2, no_switch = False): # Excel reference: https://support.office.com/en-us/article/VDB-function-dde4e207-f3fa-488d-91d2-66d55e861d73
for arg in [cost, salvage, life, start_period, end_period, factor, no_switch]:
if isinstance(arg, ExcelError) or arg in ErrorCodes:
return arg
for arg in [cost, salvage, life, start_period, end_period, factor]:
if not isinstance(arg, (float, int)):
return ExcelError('#VALUE', 'Arg %s should be an int, float or long, instead: %s' % (arg, type(arg)))
start_period = start_period
end_period = end_period
sln_depr = sln(cost, salvage, life)
depr_rate = factor / life
acc_depr = 0
depr = 0
switch_to_sln = False
sln_depr = 0
result = 0
start_life = 0
delta_life = life % 1
if delta_life > 0: # to handle cases when life is not an integer
end_life = int(life + 1)
else:
end_life = int(life)
periods = list(range(start_life, end_life))
if int(start_period) != start_period:
delta_start = abs(int(start_period) - start_period)
depr = (cost - acc_depr) * depr_rate * delta_start
acc_depr += depr
start_life = 1
periods = [x + 0.5 for x in periods]
for index, current_year in enumerate(periods):
if not no_switch: # no_switch = False (Default Case)
if switch_to_sln:
depr = sln_depr
else:
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
temp_sln_depr = sln(cost, salvage, life)
if depr < temp_sln_depr:
switch_to_sln = True
fixed_remaining_years = life - current_year - 1
fixed_remaining_cost = cost - acc_depr
# we need to check future sln: current depr should never be smaller than sln to come
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
if sln_depr > depr: # if it's the case, we switch to sln earlier than the regular case
# cancel what has been done
acc_depr -= depr
fixed_remaining_years += 1
fixed_remaining_cost = cost - acc_depr
# recalculate depreciation
sln_depr = sln(fixed_remaining_cost, salvage, fixed_remaining_years)
depr = sln_depr
acc_depr += depr
else: # no_switch = True
depr = (cost - acc_depr) * depr_rate
acc_depr += depr
delta_start = abs(current_year - start_period)
if delta_start < 1 and delta_start != 0:
result += depr * (1 - delta_start)
elif current_year >= start_period and current_year < end_period:
delta_end = abs(end_period - current_year)
if delta_end < 1 and delta_end != 0:
result += depr * delta_end
else:
result += depr
return result
def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
"""
Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV.
"""
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv
def pmt(*args): # Excel reference: https://support.office.com/en-us/article/PMT-function-0214da64-9a63-4996-bc20-214433fa6441
rate = args[0]
num_payments = args[1]
present_value = args[2]
# WARNING fv & type not used yet - both are assumed to be their defaults (0)
# fv = args[3]
# type = args[4]
return -present_value * rate / (1 - np.power(1 + rate, -num_payments))
# https://support.office.com/en-us/article/POWER-function-D3F2908B-56F4-4C3F-895A-07FB519C362A
def power(number, power):
if number == power == 0:
# Really excel? What were you thinking?
return ExcelError('#NUM!', 'Number and power cannot both be zero' % str(number))
if power < 1 and number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(number))
return np.power(number, power)
# https://support.office.com/en-ie/article/sqrt-function-654975c2-05c4-4831-9a24-2c65e4040fdf
def sqrt(number):
if number < 0:
return ExcelError('#NUM!', '%s must be non-negative' % str(index_num))
return np.sqrt(number)
# https://support.office.com/en-ie/article/today-function-5eb3078d-a82c-4736-8930-2f51a028fdd9
# https://support.office.com/en-us/article/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2
def concat(*args):
return concatenate(*tuple(flatten(args)))
# https://support.office.com/en-us/article/CONCATENATE-function-8F8AE884-2CA8-4F7A-B093-75D702BEA31D
# Important: In Excel 2016, Excel Mobile, and Excel Online, this function has
# been replaced with the CONCAT function. Although the CONCATENATE function is
# still available for backward compatibility, you should consider using CONCAT
# from now on. This is because CONCATENATE may not be available in future
# versions of Excel.
#
# BE AWARE; there are functional differences between CONACTENATE AND CONCAT
#
def concatenate(*args):
if tuple(flatten(args)) != args:
return ExcelError('#VALUE', 'Could not process arguments %s' % (args))
cat_string = ''.join(str(a) for a in args)
if len(cat_string) > CELL_CHARACTER_LIMIT:
return ExcelError('#VALUE', 'Too long. concatentaed string should be no longer than %s but is %s' % (CELL_CHARACTER_LIMIT, len(cat_String)))
return cat_string
if __name__ == '__main__':
pass
|
anthill/koala | koala/reader.py | repair_central_directory | python | def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f | trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L41-L56 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | _cast_number | python | def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value) | Convert numbers as string to an int or float | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L74-L79 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | read_rels | python | def read_rels(archive):
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | Read relationships for a workbook | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L231-L244 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | read_content_types | python | def read_content_types(archive):
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName') | Read content types. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L246-L252 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | read_sheets | python | def read_sheets(archive):
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib | Read worksheet titles and ids for a workbook | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L256-L265 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | detect_worksheets | python | def detect_worksheets(archive):
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel | Return a list of worksheets | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L267-L283 | [
"def read_content_types(archive):\n \"\"\"Read content types.\"\"\"\n xml_source = archive.read(ARC_CONTENT_TYPES)\n root = fromstring(xml_source)\n contents_root = root.findall('{%s}Override' % CONTYPES_NS)\n for type in contents_root:\n yield type.get('ContentType'), type.get('PartName')\n",
"def read_rels(archive):\n \"\"\"Read relationships for a workbook\"\"\"\n xml_source = archive.read(ARC_WORKBOOK_RELS)\n tree = fromstring(xml_source)\n for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):\n rId = element.get('Id')\n pth = element.get(\"Target\")\n typ = element.get('Type')\n # normalise path\n if pth.startswith(\"/xl\"):\n pth = pth.replace(\"/xl\", \"xl\")\n elif not pth.startswith(\"xl\") and not pth.startswith(\"..\"):\n pth = \"xl/\" + pth\n yield rId, {'path':pth, 'type':typ}\n",
"def read_sheets(archive):\n \"\"\"Read worksheet titles and ids for a workbook\"\"\"\n xml_source = archive.read(ARC_WORKBOOK)\n tree = fromstring(xml_source)\n for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):\n attrib = element.attrib\n attrib['id'] = attrib[\"{%s}id\" % REL_NS]\n del attrib[\"{%s}id\" % REL_NS]\n if attrib['id']:\n yield attrib\n"
] | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | read_string_table | python | def read_string_table(xml_source):
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings) | Read in all shared strings in the table | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L285-L299 | [
"def _get_xml_iter(xml_source):\n \"\"\"\n Possible inputs: strings, bytes, members of zipfile, temporary file\n Always return a file like object\n \"\"\"\n if not hasattr(xml_source, 'read'):\n try:\n xml_source = xml_source.encode(\"utf-8\")\n except (AttributeError, UnicodeDecodeError):\n pass\n return BytesIO(xml_source)\n else:\n try:\n xml_source.seek(0)\n except:\n # could be a zipfile\n pass\n return xml_source\n"
] | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source
|
anthill/koala | koala/reader.py | _get_xml_iter | python | def _get_xml_iter(xml_source):
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source | Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L302-L319 | null | from __future__ import print_function
from io import BytesIO
import re
import os
import json
from openpyxl.formula.translate import Translator
from openpyxl.cell.text import Text
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.functions import iterparse, fromstring, safe_iterator
try:
from xml.etree.cElementTree import ElementTree as ET
except ImportError:
from xml.etree.ElementTree import ElementTree as ET
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
PKG_REL_NS,
CONTYPES_NS,
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
ARC_WORKBOOK_RELS,
WORKSHEET_TYPE,
SHARED_STRINGS
)
curfile = os.path.abspath(os.path.dirname(__file__))
with open('%s/functions.json' % curfile, 'r') as file:
existing = json.load(file)
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
from koala.Cell import Cell
from koala.utils import CELL_REF_RE, col2num
FLOAT_REGEX = re.compile(r"\.|[E-e]")
CENTRAL_DIRECTORY_SIGNATURE = b'\x50\x4b\x05\x06'
def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0):
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f
def read_archive(file_name):
is_file_like = hasattr(file_name, 'read')
if is_file_like:
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if getattr(file_name, 'encoding', None) is not None:
raise IOError("File-object must be opened in binary mode")
try:
archive = ZipFile(file_name, 'r', ZIP_DEFLATED)
except BadZipfile as e:
f = repair_central_directory(file_name, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value)
debug = False
def read_named_ranges(archive):
root = fromstring(archive.read(ARC_WORKBOOK))
dict = {}
for name_node in safe_iterator(root, '{%s}definedName' % SHEET_MAIN_NS):
name = name_node.get('name')
# if name in dict:
# raise Exception('Named_range %s is defined in multiple sheets' % name)
if not name_node.get('hidden'):
if name_node.get('name') == 'tR':
dict[name_node.get('name')] = 'Depreciation!A1:A1000'
elif '!#REF' in name_node.text:
dict[name_node.get('name')] = '#REF!'
else:
dict[name_node.get('name')] = name_node.text.replace('$','').replace(" ","")
return dict
def read_cells(archive, ignore_sheets = [], ignore_hidden = False):
global debug
# print('___### Reading Cells from XLSX ###___')
cells = {}
functions = set()
cts = dict(read_content_types(archive))
strings_path = cts.get(SHARED_STRINGS) # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
function_map = {}
if sheet_name in ignore_sheets: continue
root = fromstring(archive.read(sheet['path'])) # it is necessary to use cElementTree from xml module, otherwise root.findall doesn't work as it should
hidden_cols = False
nb_hidden = 0
if ignore_hidden:
hidden_col_min = None
hidden_col_max = None
for col in root.findall('.//{%s}cols/*' % SHEET_MAIN_NS):
if 'hidden' in col.attrib and col.attrib['hidden'] == '1':
hidden_cols = True
hidden_col_min = int(col.attrib['min'])
hidden_col_max = int(col.attrib['max'])
for c in root.findall('.//{%s}c/*/..' % SHEET_MAIN_NS):
cell_data_type = c.get('t', 'n') # if no type assigned, assign 'number'
cell_address = c.attrib['r']
skip = False
if hidden_cols:
found = re.search(CELL_REF_RE, cell_address)
col = col2num(found.group(1))
if col >= hidden_col_min and col <= hidden_col_max:
nb_hidden += 1
skip = True
if not skip:
cell = {'a': '%s!%s' % (sheet_name, cell_address), 'f': None, 'v': None}
if debug:
print('Cell', cell['a'])
for child in c:
child_data_type = child.get('t', 'n') # if no type assigned, assign 'number'
if child.tag == '{%s}f' % SHEET_MAIN_NS :
if 'ref' in child.attrib: # the first cell of a shared formula has a 'ref' attribute
if debug:
print('*** Found definition of shared formula ***', child.text, child.attrib['ref'])
if "si" in child.attrib:
function_map[child.attrib['si']] = (child.attrib['ref'], Translator(str('=' + child.text), cell_address)) # translator of openpyxl needs a unicode argument that starts with '='
# else:
# print "Encountered cell with ref but not si: ", sheet_name, child.attrib['ref']
if child_data_type == 'shared':
if debug:
print('*** Found child %s of shared formula %s ***' % (cell_address, child.attrib['si']))
ref = function_map[child.attrib['si']][0]
formula = function_map[child.attrib['si']][1]
translated = formula.translate_formula(cell_address)
cell['f'] = translated[1:] # we need to get rid of the '='
else:
cell['f'] = child.text
elif child.tag == '{%s}v' % SHEET_MAIN_NS :
if cell_data_type == 's' or cell_data_type == 'str': # value is a string
try: # if it fails, it means that cell content is a string calculated from a formula
cell['v'] = shared_strings[int(child.text)]
except:
cell['v'] = child.text
elif cell_data_type == 'b':
cell['v'] = bool(int(child.text))
elif cell_data_type == 'n':
cell['v'] = _cast_number(child.text)
elif child.text is None:
continue
if cell['f'] is not None:
pattern = re.compile(r"([A-Z][A-Z0-9]*)\(")
found = re.findall(pattern, cell['f'])
map(lambda x: functions.add(x), found)
if cell['f'] is not None or cell['v'] is not None:
should_eval = 'always' if cell['f'] is not None and 'OFFSET' in cell['f'] else 'normal'
# cleaned_formula = cell['f']
cleaned_formula = cell['f'].replace(", ", ",") if cell['f'] is not None else None
if "!" in cell_address:
cells[cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
else:
cells[sheet_name + "!" + cell_address] = Cell(cell_address, sheet_name, value = cell['v'], formula = cleaned_formula, should_eval=should_eval)
# if nb_hidden > 0:
# print('Ignored %i hidden cells in sheet %s' % (nb_hidden, sheet_name))
# print('Nb of different functions %i' % len(functions))
# print(functions)
# for f in functions:
# if f not in existing:
# print('== Missing function: %s' % f)
return cells
def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ}
def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName')
def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel
def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings)
|
anthill/koala | koala/ast/__init__.py | create_node | python | def create_node(t, ref = None, debug = False):
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug) | Simple factory function | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L18-L31 | null | from __future__ import absolute_import
# cython: profile=True
import collections
import six
import networkx
from networkx.classes.digraph import DiGraph
from openpyxl.compat import unicode
from koala.utils import uniqueify, flatten, max_dimension, col2num, resolve_range
from koala.Cell import Cell
from koala.Range import parse_cell_address
from koala.tokenizer import ExcelParser, f_token
from .astnodes import *
class Operator(object):
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
The ref is the cell address which is passed down to the actual compiled python code.
Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.
This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.
Example:
Cell C2 has the following formula 'A1:A3 + B1:B3'.
The output will actually be A2 + B2, because the formula is relative to cell C2.
"""
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser(tokenize_range = tokenize_range);
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
t.tsubtype = "named_range"
tokens.append(t)
else:
tokens.append(t)
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
new_tokens = []
# reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
if not tokenize_range:
for index, token in enumerate(tokens):
new_tokens.append(token)
if type(token.tvalue) == str or type(token.tvalue) == unicode:
if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
depth = 0
expr = ''
rev = reversed(tokens[:index])
for t in rev: # going backwards, 'stop' starts, 'start' stops
if t.tsubtype == 'stop':
depth += 1
elif depth > 0 and t.tsubtype == 'start':
depth -= 1
expr = t.tvalue + expr
new_tokens.pop()
if depth == 0:
new_tokens.pop() # these 2 lines are needed to remove INDEX()
new_tokens.pop()
expr = six.next(rev).tvalue + expr
break
expr += token.tvalue
depth = 0
if token.tvalue[1:] in ['OFFSET', 'INDEX']:
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif depth > 0 and t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
depth = 0
expr = ''
expr += token.tvalue
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
new_tokens.pop()
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
tokens = new_tokens if new_tokens else tokens
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t, ref))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop(), ref))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop(), ref))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop(), ref))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop(), ref)
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
#print f, "has ",a," args"
output.append(f)
while stack:
if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop(), ref))
# convert to list
return [x for x in output]
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop()
# Whats the difference between subgraph() and make_subgraph() ?
def subgraph(G, seed):
subgraph = networkx.DiGraph()
todo = [(seed,n) for n in G.predecessors(seed)]
while len(todo) > 1:
previous, current = todo.pop()
addr = current.address()
subgraph.add_node(current)
subgraph.add_edge(previous, current)
for n in G.predecessors(current):
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def make_subgraph(G, seed, direction = "ascending"):
subgraph = networkx.DiGraph()
if direction == "ascending":
todo = [(seed,n) for n in G.predecessors(seed)]
else:
todo = [(seed,n) for n in G.successors(seed)]
while len(todo) > 0:
neighbor, current = todo.pop()
subgraph.add_node(current)
subgraph.add_edge(neighbor, current)
if direction == "ascending":
nexts = G.predecessors(current)
else:
nexts = G.successors(current)
for n in nexts:
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast
def prepare_pointer(code, names, ref_cell = None):
# if ref_cell is None, it means that the pointer is a named_range
try:
start, end = code.split('):')
start += ')'
except:
try:
start, end = code.split(':INDEX')
end = 'INDEX' + end
except:
start, end = code.split(':OFFSET')
end = 'OFFSET' + end
def build_code(formula):
ref = None
sheet = None
if ref_cell is not None:
sheet = ref_cell.sheet
if not ref_cell.is_named_range:
ref = parse_cell_address(ref_cell.address())
e = shunting_yard(formula, names, ref = ref, tokenize_range = False)
debug = False
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context = sheet, pointer = True)
return code
[start_code, end_code] = list(map(build_code, [start, end]))
# string replacements so that cellmap keys and pointer Range names are coherent
if ref_cell:
start_code = start_code.replace("'", '"')
end_code = end_code.replace("'", '"')
ref_cell.python_expression = ref_cell.python_expression.replace(code, "%s:%s" % (start_code, end_code))
return {
"start": start_code,
"end": end_code
}
def graph_from_seeds(seeds, cell_source):
"""
This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet
"""
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G)
|
anthill/koala | koala/ast/__init__.py | shunting_yard | python | def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser(tokenize_range = tokenize_range);
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
t.tsubtype = "named_range"
tokens.append(t)
else:
tokens.append(t)
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
new_tokens = []
# reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
if not tokenize_range:
for index, token in enumerate(tokens):
new_tokens.append(token)
if type(token.tvalue) == str or type(token.tvalue) == unicode:
if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
depth = 0
expr = ''
rev = reversed(tokens[:index])
for t in rev: # going backwards, 'stop' starts, 'start' stops
if t.tsubtype == 'stop':
depth += 1
elif depth > 0 and t.tsubtype == 'start':
depth -= 1
expr = t.tvalue + expr
new_tokens.pop()
if depth == 0:
new_tokens.pop() # these 2 lines are needed to remove INDEX()
new_tokens.pop()
expr = six.next(rev).tvalue + expr
break
expr += token.tvalue
depth = 0
if token.tvalue[1:] in ['OFFSET', 'INDEX']:
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif depth > 0 and t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
depth = 0
expr = ''
expr += token.tvalue
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
new_tokens.pop()
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
tokens = new_tokens if new_tokens else tokens
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t, ref))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop(), ref))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop(), ref))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop(), ref))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop(), ref)
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
#print f, "has ",a," args"
output.append(f)
while stack:
if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop(), ref))
# convert to list
return [x for x in output] | Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
The ref is the cell address which is passed down to the actual compiled python code.
Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.
This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.
Example:
Cell C2 has the following formula 'A1:A3 + B1:B3'.
The output will actually be A2 + B2, because the formula is relative to cell C2. | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L42-L269 | [
"def create_node(t, ref = None, debug = False):\n \"\"\"Simple factory function\"\"\"\n if t.ttype == \"operand\":\n if t.tsubtype in [\"range\", \"named_range\", \"pointer\"] :\n # print 'Creating Node', t.tvalue, t.tsubtype\n return RangeNode(t, ref, debug = debug)\n else:\n return OperandNode(t)\n elif t.ttype == \"function\":\n return FunctionNode(t, ref, debug = debug)\n elif t.ttype.startswith(\"operator\"):\n return OperatorNode(t, ref, debug = debug)\n else:\n return ASTNode(t, debug = debug)\n",
"def parse(self, formula):\n self.tokens = self.getTokens(formula)\n"
] | from __future__ import absolute_import
# cython: profile=True
import collections
import six
import networkx
from networkx.classes.digraph import DiGraph
from openpyxl.compat import unicode
from koala.utils import uniqueify, flatten, max_dimension, col2num, resolve_range
from koala.Cell import Cell
from koala.Range import parse_cell_address
from koala.tokenizer import ExcelParser, f_token
from .astnodes import *
def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug)
class Operator(object):
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop()
# Whats the difference between subgraph() and make_subgraph() ?
def subgraph(G, seed):
subgraph = networkx.DiGraph()
todo = [(seed,n) for n in G.predecessors(seed)]
while len(todo) > 1:
previous, current = todo.pop()
addr = current.address()
subgraph.add_node(current)
subgraph.add_edge(previous, current)
for n in G.predecessors(current):
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def make_subgraph(G, seed, direction = "ascending"):
subgraph = networkx.DiGraph()
if direction == "ascending":
todo = [(seed,n) for n in G.predecessors(seed)]
else:
todo = [(seed,n) for n in G.successors(seed)]
while len(todo) > 0:
neighbor, current = todo.pop()
subgraph.add_node(current)
subgraph.add_edge(neighbor, current)
if direction == "ascending":
nexts = G.predecessors(current)
else:
nexts = G.successors(current)
for n in nexts:
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast
def prepare_pointer(code, names, ref_cell = None):
# if ref_cell is None, it means that the pointer is a named_range
try:
start, end = code.split('):')
start += ')'
except:
try:
start, end = code.split(':INDEX')
end = 'INDEX' + end
except:
start, end = code.split(':OFFSET')
end = 'OFFSET' + end
def build_code(formula):
ref = None
sheet = None
if ref_cell is not None:
sheet = ref_cell.sheet
if not ref_cell.is_named_range:
ref = parse_cell_address(ref_cell.address())
e = shunting_yard(formula, names, ref = ref, tokenize_range = False)
debug = False
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context = sheet, pointer = True)
return code
[start_code, end_code] = list(map(build_code, [start, end]))
# string replacements so that cellmap keys and pointer Range names are coherent
if ref_cell:
start_code = start_code.replace("'", '"')
end_code = end_code.replace("'", '"')
ref_cell.python_expression = ref_cell.python_expression.replace(code, "%s:%s" % (start_code, end_code))
return {
"start": start_code,
"end": end_code
}
def graph_from_seeds(seeds, cell_source):
"""
This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet
"""
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G)
|
anthill/koala | koala/ast/__init__.py | build_ast | python | def build_ast(expression, debug = False):
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | build an AST from an Excel formula expression in reverse polish notation | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L271-L320 | null | from __future__ import absolute_import
# cython: profile=True
import collections
import six
import networkx
from networkx.classes.digraph import DiGraph
from openpyxl.compat import unicode
from koala.utils import uniqueify, flatten, max_dimension, col2num, resolve_range
from koala.Cell import Cell
from koala.Range import parse_cell_address
from koala.tokenizer import ExcelParser, f_token
from .astnodes import *
def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug)
class Operator(object):
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
The ref is the cell address which is passed down to the actual compiled python code.
Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.
This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.
Example:
Cell C2 has the following formula 'A1:A3 + B1:B3'.
The output will actually be A2 + B2, because the formula is relative to cell C2.
"""
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser(tokenize_range = tokenize_range);
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
t.tsubtype = "named_range"
tokens.append(t)
else:
tokens.append(t)
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
new_tokens = []
# reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
if not tokenize_range:
for index, token in enumerate(tokens):
new_tokens.append(token)
if type(token.tvalue) == str or type(token.tvalue) == unicode:
if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
depth = 0
expr = ''
rev = reversed(tokens[:index])
for t in rev: # going backwards, 'stop' starts, 'start' stops
if t.tsubtype == 'stop':
depth += 1
elif depth > 0 and t.tsubtype == 'start':
depth -= 1
expr = t.tvalue + expr
new_tokens.pop()
if depth == 0:
new_tokens.pop() # these 2 lines are needed to remove INDEX()
new_tokens.pop()
expr = six.next(rev).tvalue + expr
break
expr += token.tvalue
depth = 0
if token.tvalue[1:] in ['OFFSET', 'INDEX']:
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif depth > 0 and t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
depth = 0
expr = ''
expr += token.tvalue
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
new_tokens.pop()
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
tokens = new_tokens if new_tokens else tokens
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t, ref))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop(), ref))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop(), ref))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop(), ref))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop(), ref)
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
#print f, "has ",a," args"
output.append(f)
while stack:
if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop(), ref))
# convert to list
return [x for x in output]
# Whats the difference between subgraph() and make_subgraph() ?
def subgraph(G, seed):
subgraph = networkx.DiGraph()
todo = [(seed,n) for n in G.predecessors(seed)]
while len(todo) > 1:
previous, current = todo.pop()
addr = current.address()
subgraph.add_node(current)
subgraph.add_edge(previous, current)
for n in G.predecessors(current):
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def make_subgraph(G, seed, direction = "ascending"):
subgraph = networkx.DiGraph()
if direction == "ascending":
todo = [(seed,n) for n in G.predecessors(seed)]
else:
todo = [(seed,n) for n in G.successors(seed)]
while len(todo) > 0:
neighbor, current = todo.pop()
subgraph.add_node(current)
subgraph.add_edge(neighbor, current)
if direction == "ascending":
nexts = G.predecessors(current)
else:
nexts = G.successors(current)
for n in nexts:
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast
def prepare_pointer(code, names, ref_cell = None):
# if ref_cell is None, it means that the pointer is a named_range
try:
start, end = code.split('):')
start += ')'
except:
try:
start, end = code.split(':INDEX')
end = 'INDEX' + end
except:
start, end = code.split(':OFFSET')
end = 'OFFSET' + end
def build_code(formula):
ref = None
sheet = None
if ref_cell is not None:
sheet = ref_cell.sheet
if not ref_cell.is_named_range:
ref = parse_cell_address(ref_cell.address())
e = shunting_yard(formula, names, ref = ref, tokenize_range = False)
debug = False
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context = sheet, pointer = True)
return code
[start_code, end_code] = list(map(build_code, [start, end]))
# string replacements so that cellmap keys and pointer Range names are coherent
if ref_cell:
start_code = start_code.replace("'", '"')
end_code = end_code.replace("'", '"')
ref_cell.python_expression = ref_cell.python_expression.replace(code, "%s:%s" % (start_code, end_code))
return {
"start": start_code,
"end": end_code
}
def graph_from_seeds(seeds, cell_source):
"""
This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet
"""
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G)
|
anthill/koala | koala/ast/__init__.py | cell2code | python | def cell2code(cell, named_ranges):
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast | Generate python code for the given cell | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L358-L386 | [
"def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):\n \"\"\"\n Tokenize an excel formula expression into reverse polish notation\n\n Core algorithm taken from wikipedia with varargs extensions from\n http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/\n\n\n The ref is the cell address which is passed down to the actual compiled python code.\n Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()\n https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.\n\n This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.\n\n Example:\n Cell C2 has the following formula 'A1:A3 + B1:B3'.\n The output will actually be A2 + B2, because the formula is relative to cell C2.\n \"\"\"\n\n #remove leading =\n if expression.startswith('='):\n expression = expression[1:]\n\n p = ExcelParser(tokenize_range = tokenize_range);\n p.parse(expression)\n\n # insert tokens for '(' and ')', to make things clearer below\n tokens = []\n for t in p.tokens.items:\n if t.ttype == \"function\" and t.tsubtype == \"start\":\n t.tsubtype = \"\"\n tokens.append(t)\n tokens.append(f_token('(','arglist','start'))\n elif t.ttype == \"function\" and t.tsubtype == \"stop\":\n tokens.append(f_token(')','arglist','stop'))\n elif t.ttype == \"subexpression\" and t.tsubtype == \"start\":\n t.tvalue = '('\n tokens.append(t)\n elif t.ttype == \"subexpression\" and t.tsubtype == \"stop\":\n t.tvalue = ')'\n tokens.append(t)\n elif t.ttype == \"operand\" and t.tsubtype == \"range\" and t.tvalue in named_ranges:\n t.tsubtype = \"named_range\"\n tokens.append(t)\n else:\n tokens.append(t)\n\n #http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx\n operators = {}\n operators[':'] = Operator(':',8,'left')\n operators[''] = Operator(' ',8,'left')\n operators[','] = Operator(',',8,'left')\n operators['u-'] = Operator('u-',7,'left') #unary negation\n operators['%'] = Operator('%',6,'left')\n operators['^'] = Operator('^',5,'left')\n operators['*'] = Operator('*',4,'left')\n operators['/'] = Operator('/',4,'left')\n operators['+'] = Operator('+',3,'left')\n operators['-'] = Operator('-',3,'left')\n operators['&'] = Operator('&',2,'left')\n operators['='] = Operator('=',1,'left')\n operators['<'] = Operator('<',1,'left')\n operators['>'] = Operator('>',1,'left')\n operators['<='] = Operator('<=',1,'left')\n operators['>='] = Operator('>=',1,'left')\n operators['<>'] = Operator('<>',1,'left')\n\n output = collections.deque()\n stack = []\n were_values = []\n arg_count = []\n\n new_tokens = []\n\n # reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression\n if not tokenize_range:\n for index, token in enumerate(tokens):\n new_tokens.append(token)\n\n if type(token.tvalue) == str or type(token.tvalue) == unicode:\n\n if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10\n depth = 0\n expr = ''\n\n rev = reversed(tokens[:index])\n\n for t in rev: # going backwards, 'stop' starts, 'start' stops\n if t.tsubtype == 'stop':\n depth += 1\n elif depth > 0 and t.tsubtype == 'start':\n depth -= 1\n\n expr = t.tvalue + expr\n\n new_tokens.pop()\n\n if depth == 0:\n new_tokens.pop() # these 2 lines are needed to remove INDEX()\n new_tokens.pop()\n expr = six.next(rev).tvalue + expr\n break\n\n expr += token.tvalue\n\n depth = 0\n\n if token.tvalue[1:] in ['OFFSET', 'INDEX']:\n for t in tokens[(index + 1):]:\n if t.tsubtype == 'start':\n depth += 1\n elif depth > 0 and t.tsubtype == 'stop':\n depth -= 1\n\n expr += t.tvalue\n\n tokens.remove(t)\n\n if depth == 0:\n break\n\n new_tokens.append(f_token(expr, 'operand', 'pointer'))\n\n elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(\n depth = 0\n expr = ''\n\n expr += token.tvalue\n\n for t in tokens[(index + 1):]:\n if t.tsubtype == 'start':\n depth += 1\n elif t.tsubtype == 'stop':\n depth -= 1\n\n expr += t.tvalue\n\n tokens.remove(t)\n\n if depth == 0:\n new_tokens.pop()\n break\n\n new_tokens.append(f_token(expr, 'operand', 'pointer'))\n\n\n tokens = new_tokens if new_tokens else tokens\n\n for t in tokens:\n\n if t.ttype == \"operand\":\n output.append(create_node(t, ref))\n if were_values:\n were_values.pop()\n were_values.append(True)\n\n elif t.ttype == \"function\":\n stack.append(t)\n arg_count.append(0)\n if were_values:\n were_values.pop()\n were_values.append(True)\n were_values.append(False)\n\n elif t.ttype == \"argument\":\n\n while stack and (stack[-1].tsubtype != \"start\"):\n output.append(create_node(stack.pop(), ref))\n\n if were_values.pop(): arg_count[-1] += 1\n were_values.append(False)\n\n if not len(stack):\n raise Exception(\"Mismatched or misplaced parentheses\")\n\n elif t.ttype.startswith('operator'):\n\n if t.ttype.endswith('-prefix') and t.tvalue ==\"-\":\n o1 = operators['u-']\n else:\n o1 = operators[t.tvalue]\n\n while stack and stack[-1].ttype.startswith('operator'):\n\n if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue ==\"-\":\n o2 = operators['u-']\n else:\n o2 = operators[stack[-1].tvalue]\n\n if ( (o1.associativity == \"left\" and o1.precedence <= o2.precedence)\n or\n (o1.associativity == \"right\" and o1.precedence < o2.precedence) ):\n output.append(create_node(stack.pop(), ref))\n else:\n break\n stack.append(t)\n\n elif t.tsubtype == \"start\":\n stack.append(t)\n\n elif t.tsubtype == \"stop\":\n\n while stack and stack[-1].tsubtype != \"start\":\n output.append(create_node(stack.pop(), ref))\n\n if not stack:\n raise Exception(\"Mismatched or misplaced parentheses\")\n stack.pop()\n\n if stack and stack[-1].ttype == \"function\":\n f = create_node(stack.pop(), ref)\n a = arg_count.pop()\n w = were_values.pop()\n if w: a += 1\n f.num_args = a\n #print f, \"has \",a,\" args\"\n output.append(f)\n\n\n\n while stack:\n if (stack[-1].tsubtype == \"start\" or stack[-1].tsubtype == \"stop\"):\n raise Exception(\"Mismatched or misplaced parentheses\")\n\n output.append(create_node(stack.pop(), ref))\n\n # convert to list\n return [x for x in output]\n",
"def build_ast(expression, debug = False):\n \"\"\"build an AST from an Excel formula expression in reverse polish notation\"\"\"\n #use a directed graph to store the tree\n G = DiGraph()\n stack = []\n\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n,OperatorNode):\n if n.ttype == \"operator-infix\":\n arg2 = stack.pop()\n arg1 = stack.pop()\n # Hack to write the name of sheet in 2argument address\n if(n.tvalue == ':'):\n if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:\n arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue\n\n G.add_node(arg1,pos = 1)\n G.add_node(arg2,pos = 2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1,pos = 1)\n G.add_edge(arg1, n)\n\n elif isinstance(n,FunctionNode):\n args = []\n for _ in range(n.num_args):\n try:\n args.append(stack.pop())\n except:\n raise Exception()\n #try:\n # args = [stack.pop() for _ in range(n.num_args)]\n #except:\n # print 'STACK', stack, type(n)\n # raise Exception('prut')\n args.reverse()\n for i,a in enumerate(args):\n G.add_node(a,pos = i)\n G.add_edge(a,n)\n else:\n G.add_node(n,pos=0)\n\n stack.append(n)\n\n return G,stack.pop()\n",
"def parse_cell_address(ref):\n # A1 => (1, 'A')\n try:\n if ref not in parse_cell_addr_cache:\n found = re.search(CELL_REF_RE, ref)\n col = found.group(1)\n row = found.group(2)\n result = (int(row), col)\n parse_cell_addr_cache[ref] = result\n return result\n else:\n return parse_cell_addr_cache[ref]\n except:\n raise Exception('Couldn\\'t find match in cell ref')\n",
"def address(self, absolute=True):\n if self.is_named_range:\n return self.__named_range\n elif absolute:\n return self.__absolute_address\n else:\n return self.__address\n"
] | from __future__ import absolute_import
# cython: profile=True
import collections
import six
import networkx
from networkx.classes.digraph import DiGraph
from openpyxl.compat import unicode
from koala.utils import uniqueify, flatten, max_dimension, col2num, resolve_range
from koala.Cell import Cell
from koala.Range import parse_cell_address
from koala.tokenizer import ExcelParser, f_token
from .astnodes import *
def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug)
class Operator(object):
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
The ref is the cell address which is passed down to the actual compiled python code.
Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.
This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.
Example:
Cell C2 has the following formula 'A1:A3 + B1:B3'.
The output will actually be A2 + B2, because the formula is relative to cell C2.
"""
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser(tokenize_range = tokenize_range);
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
t.tsubtype = "named_range"
tokens.append(t)
else:
tokens.append(t)
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
new_tokens = []
# reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
if not tokenize_range:
for index, token in enumerate(tokens):
new_tokens.append(token)
if type(token.tvalue) == str or type(token.tvalue) == unicode:
if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
depth = 0
expr = ''
rev = reversed(tokens[:index])
for t in rev: # going backwards, 'stop' starts, 'start' stops
if t.tsubtype == 'stop':
depth += 1
elif depth > 0 and t.tsubtype == 'start':
depth -= 1
expr = t.tvalue + expr
new_tokens.pop()
if depth == 0:
new_tokens.pop() # these 2 lines are needed to remove INDEX()
new_tokens.pop()
expr = six.next(rev).tvalue + expr
break
expr += token.tvalue
depth = 0
if token.tvalue[1:] in ['OFFSET', 'INDEX']:
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif depth > 0 and t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
depth = 0
expr = ''
expr += token.tvalue
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
new_tokens.pop()
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
tokens = new_tokens if new_tokens else tokens
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t, ref))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop(), ref))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop(), ref))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop(), ref))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop(), ref)
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
#print f, "has ",a," args"
output.append(f)
while stack:
if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop(), ref))
# convert to list
return [x for x in output]
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop()
# Whats the difference between subgraph() and make_subgraph() ?
def subgraph(G, seed):
subgraph = networkx.DiGraph()
todo = [(seed,n) for n in G.predecessors(seed)]
while len(todo) > 1:
previous, current = todo.pop()
addr = current.address()
subgraph.add_node(current)
subgraph.add_edge(previous, current)
for n in G.predecessors(current):
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def make_subgraph(G, seed, direction = "ascending"):
subgraph = networkx.DiGraph()
if direction == "ascending":
todo = [(seed,n) for n in G.predecessors(seed)]
else:
todo = [(seed,n) for n in G.successors(seed)]
while len(todo) > 0:
neighbor, current = todo.pop()
subgraph.add_node(current)
subgraph.add_edge(neighbor, current)
if direction == "ascending":
nexts = G.predecessors(current)
else:
nexts = G.successors(current)
for n in nexts:
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def prepare_pointer(code, names, ref_cell = None):
# if ref_cell is None, it means that the pointer is a named_range
try:
start, end = code.split('):')
start += ')'
except:
try:
start, end = code.split(':INDEX')
end = 'INDEX' + end
except:
start, end = code.split(':OFFSET')
end = 'OFFSET' + end
def build_code(formula):
ref = None
sheet = None
if ref_cell is not None:
sheet = ref_cell.sheet
if not ref_cell.is_named_range:
ref = parse_cell_address(ref_cell.address())
e = shunting_yard(formula, names, ref = ref, tokenize_range = False)
debug = False
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context = sheet, pointer = True)
return code
[start_code, end_code] = list(map(build_code, [start, end]))
# string replacements so that cellmap keys and pointer Range names are coherent
if ref_cell:
start_code = start_code.replace("'", '"')
end_code = end_code.replace("'", '"')
ref_cell.python_expression = ref_cell.python_expression.replace(code, "%s:%s" % (start_code, end_code))
return {
"start": start_code,
"end": end_code
}
def graph_from_seeds(seeds, cell_source):
"""
This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet
"""
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G)
|
anthill/koala | koala/ast/__init__.py | graph_from_seeds | python | def graph_from_seeds(seeds, cell_source):
# when called from Spreadsheet instance, use the Spreadsheet cellmap and graph
if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
# when called from ExcelCompiler instance, construct cellmap and graph from seeds
else: # ~ cell_source is a ExcelCompiler
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
# directed graph
G = networkx.DiGraph()
# match the info in cellmap
for c in cellmap.values(): G.add_node(c)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
###### 1) looking for cell c1 dependencies ####################
# print 'C1', c1.address()
# in case a formula, get all cells that are arguments
pystr, ast = cell2code(c1, names)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr.replace('"', "'") # compilation is done later
if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:
if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler
cell_source.pointers.add(c1.address())
# get all the cells/ranges this formula refers to
deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]
# remove dupes
deps = uniqueify(deps)
###### 2) connect dependencies in cells in graph ####################
# ### LOG
# tmp = []
# for dep in deps:
# if dep not in names:
# if "!" not in dep and cursheet != None:
# dep = cursheet + "!" + dep
# if dep not in cellmap:
# tmp.append(dep)
# #deps = tmp
# logStep = "%s %s = %s " % ('|'*step, c1.address(), '',)
# print logStep
# if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):
# print logStep, "[%s...%s]" % (deps[0], deps[-1])
# elif len(deps) > 0:
# print logStep, "->", deps
# else:
# print logStep, "done"
for dep in deps:
dep_name = dep.tvalue.replace('$','')
# this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError
if dep_name.startswith(':') or dep_name.endswith(':'):
dep_name = dep_name.replace(':', '')
# if not pointer, we need an absolute address
if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None:
dep_name = cursheet + "!" + dep_name
# Named_ranges + ranges already parsed (previous iterations)
if dep_name in cellmap:
origins = [cellmap[dep_name]]
target = cellmap[c1.address()]
# if the dep_name is a multi-cell range, create a range object
elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if 'OFFSET' in reference or 'INDEX' in reference:
start_end = prepare_pointer(reference, names, ref_cell = c1)
rng = cell_source.Range(start_end)
if dep_name in names: # dep is a pointer range
address = dep_name
else:
if c1.address() in names: # c1 holds is a pointer range
address = c1.address()
else: # a pointer range with no name, its address will be its name
address = '%s:%s' % (start_end["start"], start_end["end"])
cell_source.pointers.add(address)
else:
address = dep_name
# get a list of the addresses in this range that are not yet in the graph
range_addresses = list(resolve_range(reference, should_flatten=True)[0])
cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]
if len(cellmap_add_addresses) > 0:
# this means there are cells to be added
# get row and col dimensions for the sheet, assuming the whole range is in one sheet
sheet_initial = split_address(cellmap_add_addresses[0])[0]
max_rows, max_cols = max_dimension(cellmap, sheet_initial)
# create empty cells that aren't in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
# if somehow a new sheet comes up in the range, get the new dimensions
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
# add the empty cells
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
# only add cells within the maximum bounds of the sheet to avoid too many evaluations
# for A:A or 1:1 ranges
cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object
cellmap[addr] = cell_new # add it to the cellmap
G.add_node(cell_new) # add it to the graph
cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
# save the range
cellmap[address] = virtual_cell
# add an edge from the range to the parent
G.add_node(virtual_cell)
# Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1
G.add_edge(virtual_cell, c1)
# cells in the range should point to the range as their parent
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...
for child in rng.addresses:
if child not in cellmap:
origins.append(cells[child])
else:
origins.append(cellmap[child])
else:
# not a range
if dep_name in names:
reference = names[dep_name]
else:
reference = dep_name
if reference in cells:
if dep_name in names:
virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )
G.add_node(virtual_cell)
G.add_edge(cells[reference], virtual_cell)
origins = [virtual_cell]
else:
cell = cells[reference]
origins = [cell]
cell = origins[0]
if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
# process each cell
for c2 in flatten(origins):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
steps.append(step+1)
else:
# constant cell, no need for further processing, just remember to set the code
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
G.add_node(c2)
# add an edge from the cell to the parent (range or cell)
if(target != []):
# print "Adding edge %s --> %s" % (c2.address(), target.address())
G.add_edge(c2,target)
c1.compile() # cell compilation is done here because pointer ranges might update python_expressions
return (cellmap, G) | This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L435-L655 | [
"def is_range(address):\n if isinstance(address, Exception):\n return address\n return address.find(':') > 0\n",
"def split_address(address):\n\n if address in split_address_cache:\n return split_address_cache[address]\n\n else:\n sheet = None\n if address.find('!') > 0:\n sheet,addr = address.split('!')\n else:\n addr = address\n\n #ignore case\n addr = addr.upper()\n\n # regular <col><row> format\n if re.match('^[A-Z\\$]+[\\d\\$]+$', addr):\n col,row = [_f for _f in re.split('([A-Z\\$]+)',addr) if _f]\n # R<row>C<col> format\n elif re.match('^R\\d+C\\d+$', addr):\n row,col = addr.split('C')\n row = row[1:]\n # R[<row>]C[<col>] format\n elif re.match('^R\\[\\d+\\]C\\[\\d+\\]$', addr):\n row,col = addr.split('C')\n row = row[2:-1]\n col = col[2:-1]\n # [<row>] format\n elif re.match('^[\\d\\$]+$', addr):\n row = addr\n col = None\n # [<col>] format\n elif re.match('^[A-Z\\$]$', addr):\n row = None\n col = addr\n else:\n raise Exception('Invalid address format ' + addr)\n\n split_address_cache[address] = (sheet, col, row)\n return (sheet,col,row)\n",
"def col2num(col):\n\n if col in col2num_cache:\n return col2num_cache[col]\n else:\n if not col:\n raise Exception(\"Column may not be empty\")\n\n tot = 0\n for i,c in enumerate([c for c in col[::-1] if c != \"$\"]):\n if c == '$': continue\n tot += (ord(c)-64) * 26 ** i\n\n col2num_cache[col] = tot\n return tot\n",
"def resolve_range(rng, should_flatten = False, sheet=''):\n # print 'RESOLVE RANGE splitting', rng\n if ':' not in rng:\n if '!' in rng:\n rng = rng.split('!')\n return ExcelError('#REF!', info = '%s is not a regular range, nor a named_range' % rng)\n sh, start, end = split_range(rng)\n\n if sh and sheet:\n if sh != sheet:\n raise Exception(\"Mismatched sheets %s and %s\" % (sh,sheet))\n else:\n sheet += '!'\n elif sh and not sheet:\n sheet = sh + \"!\"\n elif sheet and not sh:\n sheet += \"!\"\n else:\n pass\n\n # `unicode` != `str` in Python2. See `from openpyxl.compat import unicode`\n if type(sheet) == str and str != unicode:\n sheet = unicode(sheet, 'utf-8')\n if type(rng) == str and str != unicode:\n rng = unicode(rng, 'utf-8')\n\n key = rng+str(should_flatten)+sheet\n\n if key in resolve_range_cache:\n return resolve_range_cache[key]\n else:\n if not is_range(rng): return ([sheet + rng],1,1)\n # single cell, no range\n if start.isdigit() and end.isdigit():\n # This copes with 1:1 style ranges\n start_col = \"A\"\n start_row = start\n end_col = \"XFD\"\n end_row = end\n elif start.isalpha() and end.isalpha():\n # This copes with A:A style ranges\n start_col = start\n start_row = 1\n end_col = end\n end_row = 2**20\n else:\n sh, start_col, start_row = split_address(start)\n sh, end_col, end_row = split_address(end)\n\n start_col_idx = col2num(start_col)\n end_col_idx = col2num(end_col);\n\n start_row = int(start_row)\n end_row = int(end_row)\n\n # Attempt to use Numpy, not relevant for now\n\n # num2col_vec = np.vectorize(num2col)\n # r = np.array([range(start_row, end_row + 1),]*nb_col, dtype='a5').T\n # c = num2col_vec(np.array([range(start_col_idx, end_col_idx + 1),]*nb_row))\n # if len(sheet)>0:\n # s = np.chararray((nb_row, nb_col), itemsize=len(sheet))\n # s[:] = sheet\n # c = np.core.defchararray.add(s, c)\n # B = np.core.defchararray.add(c, r)\n\n\n # if start_col == end_col:\n # data = B.T.tolist()[0]\n # return data, len(data), 1\n # elif start_row == end_row:\n # data = B.tolist()[0]\n # return data, 1, len(data)\n # else:\n # if should_flatten:\n # return B.flatten().tolist(), 1, nb_col*nb_row\n # else:\n # return B.tolist(), nb_row, nb_col\n\n # single column\n if start_col == end_col:\n nrows = end_row - start_row + 1\n data = [ \"%s%s%s\" % (s,c,r) for (s,c,r) in zip([sheet]*nrows,[start_col]*nrows,list(range(start_row,end_row+1)))]\n\n output = data,len(data),1\n\n # single row\n elif start_row == end_row:\n ncols = end_col_idx - start_col_idx + 1\n data = [ \"%s%s%s\" % (s,num2col(c),r) for (s,c,r) in zip([sheet]*ncols,list(range(start_col_idx,end_col_idx+1)),[start_row]*ncols)]\n output = data,1,len(data)\n\n # rectangular range\n else:\n cells = []\n for r in range(start_row,end_row+1):\n row = []\n for c in range(start_col_idx,end_col_idx+1):\n row.append(sheet + num2col(c) + str(r))\n\n cells.append(row)\n\n if should_flatten:\n # flatten into one list\n l = list(flatten(cells, only_lists = True))\n output = l,len(cells), len(cells[0])\n else:\n output = cells, len(cells), len(cells[0])\n\n resolve_range_cache[key] = output\n return output\n",
"def prepare_pointer(code, names, ref_cell = None):\n # if ref_cell is None, it means that the pointer is a named_range\n\n try:\n start, end = code.split('):')\n start += ')'\n except:\n try:\n start, end = code.split(':INDEX')\n end = 'INDEX' + end\n except:\n start, end = code.split(':OFFSET')\n end = 'OFFSET' + end\n\n def build_code(formula):\n ref = None\n sheet = None\n\n if ref_cell is not None:\n sheet = ref_cell.sheet\n\n if not ref_cell.is_named_range:\n ref = parse_cell_address(ref_cell.address())\n\n e = shunting_yard(formula, names, ref = ref, tokenize_range = False)\n debug = False\n ast,root = build_ast(e, debug = debug)\n code = root.emit(ast, context = sheet, pointer = True)\n\n return code\n\n [start_code, end_code] = list(map(build_code, [start, end]))\n\n # string replacements so that cellmap keys and pointer Range names are coherent\n if ref_cell:\n start_code = start_code.replace(\"'\", '\"')\n end_code = end_code.replace(\"'\", '\"')\n\n ref_cell.python_expression = ref_cell.python_expression.replace(code, \"%s:%s\" % (start_code, end_code))\n\n return {\n \"start\": start_code,\n \"end\": end_code\n }\n",
"def flatten(l, only_lists = False):\n instance = list if only_lists else collections.Iterable\n\n for el in l:\n if isinstance(el, instance) and not isinstance(el, string_types):\n for sub in flatten(el, only_lists = only_lists):\n yield sub\n else:\n yield el\n",
"def uniqueify(seq):\n seen = set()\n seen_add = seen.add\n return [ x for x in seq if x not in seen and not seen_add(x)]\n",
"def max_dimension(cellmap, sheet = None):\n \"\"\"\n This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple\n of two integers, the first being the rows and the second being the columns.\n\n :param cellmap: all the cells that should be used to calculate the maximum.\n :param sheet: (optionally) a string with the sheet name.\n :return: a tupple of two integers, the first being the rows and the second being the columns.\n \"\"\"\n\n cells = list(cellmap.values())\n rows = 0\n cols = 0\n for cell in cells:\n if sheet is None or cell.sheet == sheet:\n rows = max(rows, int(cell.row))\n cols = max(cols, int(col2num(cell.col)))\n\n return (rows, cols)\n",
"def cell2code(cell, named_ranges):\n \"\"\"Generate python code for the given cell\"\"\"\n if cell.formula:\n\n debug = False\n # if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:\n # debug = True\n # if debug:\n # print 'FORMULA', cell.formula\n\n ref = parse_cell_address(cell.address()) if not cell.is_named_range else None\n sheet = cell.sheet\n\n e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)\n\n ast,root = build_ast(e, debug = debug)\n code = root.emit(ast, context=sheet)\n\n # print 'CODE', code, ref\n\n else:\n ast = None\n if isinstance(cell.value, unicode):\n code = u'u\"' + cell.value.replace(u'\"', u'\\\\\"') + u'\"'\n elif isinstance(cell.value, str):\n raise RuntimeError(\"Got unexpected non-unicode str\")\n else:\n code = str(cell.value)\n return code,ast\n"
] | from __future__ import absolute_import
# cython: profile=True
import collections
import six
import networkx
from networkx.classes.digraph import DiGraph
from openpyxl.compat import unicode
from koala.utils import uniqueify, flatten, max_dimension, col2num, resolve_range
from koala.Cell import Cell
from koala.Range import parse_cell_address
from koala.tokenizer import ExcelParser, f_token
from .astnodes import *
def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug)
class Operator(object):
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
The ref is the cell address which is passed down to the actual compiled python code.
Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.
This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.
Example:
Cell C2 has the following formula 'A1:A3 + B1:B3'.
The output will actually be A2 + B2, because the formula is relative to cell C2.
"""
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser(tokenize_range = tokenize_range);
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
t.tsubtype = "named_range"
tokens.append(t)
else:
tokens.append(t)
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
new_tokens = []
# reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
if not tokenize_range:
for index, token in enumerate(tokens):
new_tokens.append(token)
if type(token.tvalue) == str or type(token.tvalue) == unicode:
if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
depth = 0
expr = ''
rev = reversed(tokens[:index])
for t in rev: # going backwards, 'stop' starts, 'start' stops
if t.tsubtype == 'stop':
depth += 1
elif depth > 0 and t.tsubtype == 'start':
depth -= 1
expr = t.tvalue + expr
new_tokens.pop()
if depth == 0:
new_tokens.pop() # these 2 lines are needed to remove INDEX()
new_tokens.pop()
expr = six.next(rev).tvalue + expr
break
expr += token.tvalue
depth = 0
if token.tvalue[1:] in ['OFFSET', 'INDEX']:
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif depth > 0 and t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
depth = 0
expr = ''
expr += token.tvalue
for t in tokens[(index + 1):]:
if t.tsubtype == 'start':
depth += 1
elif t.tsubtype == 'stop':
depth -= 1
expr += t.tvalue
tokens.remove(t)
if depth == 0:
new_tokens.pop()
break
new_tokens.append(f_token(expr, 'operand', 'pointer'))
tokens = new_tokens if new_tokens else tokens
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t, ref))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop(), ref))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop(), ref))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop(), ref))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop(), ref)
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
#print f, "has ",a," args"
output.append(f)
while stack:
if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop(), ref))
# convert to list
return [x for x in output]
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop()
# Whats the difference between subgraph() and make_subgraph() ?
def subgraph(G, seed):
subgraph = networkx.DiGraph()
todo = [(seed,n) for n in G.predecessors(seed)]
while len(todo) > 1:
previous, current = todo.pop()
addr = current.address()
subgraph.add_node(current)
subgraph.add_edge(previous, current)
for n in G.predecessors(current):
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def make_subgraph(G, seed, direction = "ascending"):
subgraph = networkx.DiGraph()
if direction == "ascending":
todo = [(seed,n) for n in G.predecessors(seed)]
else:
todo = [(seed,n) for n in G.successors(seed)]
while len(todo) > 0:
neighbor, current = todo.pop()
subgraph.add_node(current)
subgraph.add_edge(neighbor, current)
if direction == "ascending":
nexts = G.predecessors(current)
else:
nexts = G.successors(current)
for n in nexts:
if n not in subgraph.nodes():
todo += [(current,n)]
return subgraph
def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast
def prepare_pointer(code, names, ref_cell = None):
# if ref_cell is None, it means that the pointer is a named_range
try:
start, end = code.split('):')
start += ')'
except:
try:
start, end = code.split(':INDEX')
end = 'INDEX' + end
except:
start, end = code.split(':OFFSET')
end = 'OFFSET' + end
def build_code(formula):
ref = None
sheet = None
if ref_cell is not None:
sheet = ref_cell.sheet
if not ref_cell.is_named_range:
ref = parse_cell_address(ref_cell.address())
e = shunting_yard(formula, names, ref = ref, tokenize_range = False)
debug = False
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context = sheet, pointer = True)
return code
[start_code, end_code] = list(map(build_code, [start, end]))
# string replacements so that cellmap keys and pointer Range names are coherent
if ref_cell:
start_code = start_code.replace("'", '"')
end_code = end_code.replace("'", '"')
ref_cell.python_expression = ref_cell.python_expression.replace(code, "%s:%s" % (start_code, end_code))
return {
"start": start_code,
"end": end_code
}
|
bheinzerling/pyrouge | pyrouge/utils/sentence_splitter.py | PunktSentenceSplitter.split | python | def split(self, text):
text = cleanup(text)
return self.sent_detector.tokenize(text.strip()) | Splits text and returns a list of the resulting sentences. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/sentence_splitter.py#L37-L40 | [
"def cleanup(s):\n return remove_newlines(s)\n"
] | class PunktSentenceSplitter:
"""
Splits sentences using the NLTK Punkt sentence tokenizer. If installed,
PunktSentenceSplitter can use the default NLTK data for English, otherwise
custom trained data has to be provided.
"""
def __init__(self, language="en", punkt_data_path=None):
self.lang2datapath = {"en": "tokenizers/punkt/english.pickle"}
self.log = log.get_global_console_logger()
try:
import nltk.data
except ImportError:
self.log.error(
"Cannot import NLTK data for the sentence splitter. Please "
"check if the 'punkt' NLTK-package is installed correctly.")
try:
if not punkt_data_path:
punkt_data_path = self.lang2datapath[language]
self.sent_detector = nltk.data.load(punkt_data_path)
except KeyError:
self.log.error(
"No sentence splitter data for language {}.".format(language))
except:
self.log.error(
"Could not load sentence splitter data: {}".format(
self.lang2datapath[language]))
@staticmethod
def split_files(input_dir, output_dir, lang="en", punkt_data_path=None):
ss = PunktSentenceSplitter(lang, punkt_data_path)
DirectoryProcessor.process(input_dir, output_dir, ss.split)
|
bheinzerling/pyrouge | pyrouge/utils/file_utils.py | str_from_file | python | def str_from_file(path):
with open(path) as f:
s = f.read().strip()
return s | Return file contents as string. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L37-L44 | null | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import xml.etree.ElementTree as et
from pyrouge.utils import log
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
logger.info("Processing {}.".format(input_file_name))
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(output_string)
logger.info("Saved processed files to {}.".format(output_dir))
def xml_equal(xml_file1, xml_file2):
"""
Parse xml and convert to a canonical string representation so we don't
have to worry about semantically meaningless differences
"""
def canonical(xml_file):
# poor man's canonicalization, since we don't want to install
# external packages just for unittesting
s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8")
s = re.sub("[\n|\t]*", "", s)
s = re.sub("\s+", " ", s)
s = "".join(sorted(s)).strip()
return s
return canonical(xml_file1) == canonical(xml_file2)
def list_files(dir_path, recursive=True):
"""
Return a list of files in dir_path.
"""
for root, dirs, files in os.walk(dir_path):
file_list = [os.path.join(root, f) for f in files]
if recursive:
for dir in dirs:
dir = os.path.join(root, dir)
file_list.extend(list_files(dir, recursive=True))
return file_list
def verify_dir(path, name=None):
if name:
name_str = "Cannot set {} directory because t".format(name)
else:
name_str = "T"
msg = "{}he path {} does not exist.".format(name_str, path)
if not os.path.exists(path):
raise Exception(msg)
|
bheinzerling/pyrouge | pyrouge/utils/file_utils.py | xml_equal | python | def xml_equal(xml_file1, xml_file2):
def canonical(xml_file):
# poor man's canonicalization, since we don't want to install
# external packages just for unittesting
s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8")
s = re.sub("[\n|\t]*", "", s)
s = re.sub("\s+", " ", s)
s = "".join(sorted(s)).strip()
return s
return canonical(xml_file1) == canonical(xml_file2) | Parse xml and convert to a canonical string representation so we don't
have to worry about semantically meaningless differences | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L47-L62 | [
"def canonical(xml_file):\n # poor man's canonicalization, since we don't want to install\n # external packages just for unittesting\n s = et.tostring(et.parse(xml_file).getroot()).decode(\"UTF-8\")\n s = re.sub(\"[\\n|\\t]*\", \"\", s)\n s = re.sub(\"\\s+\", \" \", s)\n s = \"\".join(sorted(s)).strip()\n return s\n"
] | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import xml.etree.ElementTree as et
from pyrouge.utils import log
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
logger.info("Processing {}.".format(input_file_name))
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(output_string)
logger.info("Saved processed files to {}.".format(output_dir))
def str_from_file(path):
"""
Return file contents as string.
"""
with open(path) as f:
s = f.read().strip()
return s
def list_files(dir_path, recursive=True):
"""
Return a list of files in dir_path.
"""
for root, dirs, files in os.walk(dir_path):
file_list = [os.path.join(root, f) for f in files]
if recursive:
for dir in dirs:
dir = os.path.join(root, dir)
file_list.extend(list_files(dir, recursive=True))
return file_list
def verify_dir(path, name=None):
if name:
name_str = "Cannot set {} directory because t".format(name)
else:
name_str = "T"
msg = "{}he path {} does not exist.".format(name_str, path)
if not os.path.exists(path):
raise Exception(msg)
|
bheinzerling/pyrouge | pyrouge/utils/file_utils.py | list_files | python | def list_files(dir_path, recursive=True):
for root, dirs, files in os.walk(dir_path):
file_list = [os.path.join(root, f) for f in files]
if recursive:
for dir in dirs:
dir = os.path.join(root, dir)
file_list.extend(list_files(dir, recursive=True))
return file_list | Return a list of files in dir_path. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L65-L77 | [
"def list_files(dir_path, recursive=True):\n \"\"\"\n Return a list of files in dir_path.\n\n \"\"\"\n\n for root, dirs, files in os.walk(dir_path):\n file_list = [os.path.join(root, f) for f in files]\n if recursive:\n for dir in dirs:\n dir = os.path.join(root, dir)\n file_list.extend(list_files(dir, recursive=True))\n return file_list\n"
] | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import xml.etree.ElementTree as et
from pyrouge.utils import log
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
logger.info("Processing {}.".format(input_file_name))
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(output_string)
logger.info("Saved processed files to {}.".format(output_dir))
def str_from_file(path):
"""
Return file contents as string.
"""
with open(path) as f:
s = f.read().strip()
return s
def xml_equal(xml_file1, xml_file2):
"""
Parse xml and convert to a canonical string representation so we don't
have to worry about semantically meaningless differences
"""
def canonical(xml_file):
# poor man's canonicalization, since we don't want to install
# external packages just for unittesting
s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8")
s = re.sub("[\n|\t]*", "", s)
s = re.sub("\s+", " ", s)
s = "".join(sorted(s)).strip()
return s
return canonical(xml_file1) == canonical(xml_file2)
def verify_dir(path, name=None):
if name:
name_str = "Cannot set {} directory because t".format(name)
else:
name_str = "T"
msg = "{}he path {} does not exist.".format(name_str, path)
if not os.path.exists(path):
raise Exception(msg)
|
bheinzerling/pyrouge | pyrouge/utils/file_utils.py | DirectoryProcessor.process | python | def process(input_dir, output_dir, function):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
logger.info("Processing {}.".format(input_file_name))
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(output_string)
logger.info("Saved processed files to {}.".format(output_dir)) | Apply function to all files in input_dir and save the resulting ouput
files in output_dir. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/utils/file_utils.py#L14-L34 | [
"def get_global_console_logger(level=logging.INFO):\n return get_console_logger('global', level)\n",
"def from_html(html):\n\tsoup = BeautifulSoup(html)\n\tsentences = [elem.text for elem in soup.find_all(\"a\") if 'id' in elem.attrs]\n\ttext = \"\\n\".join(sentences)\n\treturn text\n",
" def convert_text_to_rouge_format(text, title=\"dummy title\"):\n \"\"\"\n Convert a text to a format ROUGE understands. The text is\n assumed to contain one sentence per line.\n\n text: The text to convert, containg one sentence per line.\n title: Optional title for the text. The title will appear\n in the converted file, but doesn't seem to have\n any other relevance.\n\n Returns: The converted text as string.\n\n \"\"\"\n sentences = text.split(\"\\n\")\n sent_elems = [\n \"<a name=\\\"{i}\\\">[{i}]</a> <a href=\\\"#{i}\\\" id={i}>\"\n \"{text}</a>\".format(i=i, text=sent)\n for i, sent in enumerate(sentences, start=1)]\n html = \"\"\"<html>\n<head>\n<title>{title}</title>\n</head>\n<body bgcolor=\"white\">\n{elems}\n</body>\n</html>\"\"\".format(title=title, elems=\"\\n\".join(sent_elems))\n\n return html\n",
"def split(self, text):\n \"\"\"Splits text and returns a list of the resulting sentences.\"\"\"\n text = cleanup(text)\n return self.sent_detector.tokenize(text.strip())\n"
] | class DirectoryProcessor:
@staticmethod
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.split_sentences | python | def split_sentences(self):
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func) | ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L178-L190 | [
"def __process_summaries(self, process_func):\n \"\"\"\n Helper method that applies process_func to the files in the\n system and model folders and saves the resulting files to new\n system and model folders.\n\n \"\"\"\n temp_dir = mkdtemp()\n new_system_dir = os.path.join(temp_dir, \"system\")\n os.mkdir(new_system_dir)\n new_model_dir = os.path.join(temp_dir, \"model\")\n os.mkdir(new_model_dir)\n self.log.info(\n \"Processing summaries. Saving system files to {} and \"\n \"model files to {}.\".format(new_system_dir, new_model_dir))\n process_func(self._system_dir, new_system_dir)\n process_func(self._model_dir, new_model_dir)\n self._system_dir = new_system_dir\n self._model_dir = new_model_dir\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.convert_text_to_rouge_format | python | def convert_text_to_rouge_format(text, title="dummy title"):
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html | Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L208-L235 | null | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.write_config_static | python | def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>") | Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L238-L292 | [
" def __get_eval_string(\n task_id, system_id,\n system_dir, system_filename,\n model_dir, model_filenames):\n \"\"\"\n ROUGE can evaluate several system summaries for a given text\n against several model summaries, i.e. there is an m-to-n\n relation between system and model summaries. The system\n summaries are listed in the <PEERS> tag and the model summaries\n in the <MODELS> tag. pyrouge currently only supports one system\n summary per text, i.e. it assumes a 1-to-n relation between\n system and model summaries.\n\n \"\"\"\n peer_elems = \"<P ID=\\\"{id}\\\">{name}</P>\".format(\n id=system_id, name=system_filename)\n\n model_elems = [\"<M ID=\\\"{id}\\\">{name}</M>\".format(\n id=chr(65 + i), name=name)\n for i, name in enumerate(model_filenames)]\n\n model_elems = \"\\n\\t\\t\\t\".join(model_elems)\n eval_string = \"\"\"\n <EVAL ID=\"{task_id}\">\n <MODEL-ROOT>{model_root}</MODEL-ROOT>\n <PEER-ROOT>{peer_root}</PEER-ROOT>\n <INPUT-FORMAT TYPE=\"SEE\">\n </INPUT-FORMAT>\n <PEERS>\n {peer_elems}\n </PEERS>\n <MODELS>\n {model_elems}\n </MODELS>\n </EVAL>\n\"\"\".format(\n task_id=task_id,\n model_root=model_dir, model_elems=model_elems,\n peer_root=system_dir, peer_elems=peer_elems)\n return eval_string\n",
"def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):\n pattern = re.compile(model_filenames_pattern.replace('#ID#', id))\n model_filenames = [\n f for f in os.listdir(model_dir) if pattern.match(f)]\n if not model_filenames:\n raise Exception(\n \"Could not find any model summaries for the system\"\n \" summary with ID {}. Specified model filename pattern was: \"\n \"{}\".format(id, model_filenames_pattern))\n return model_filenames\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.write_config | python | def write_config(self, config_file_path=None, system_id=None):
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file)) | Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L294-L320 | [
"def verify_dir(path, name=None):\n if name:\n name_str = \"Cannot set {} directory because t\".format(name)\n else:\n name_str = \"T\"\n msg = \"{}he path {} does not exist.\".format(name_str, path)\n if not os.path.exists(path):\n raise Exception(msg)\n",
"def write_config_static(system_dir, system_filename_pattern,\n model_dir, model_filename_pattern,\n config_file_path, system_id=None):\n \"\"\"\n Write the ROUGE configuration file, which is basically a list\n of system summary files and their corresponding model summary\n files.\n\n pyrouge uses regular expressions to automatically find the\n matching model summary files for a given system summary file\n (cf. docstrings for system_filename_pattern and\n model_filename_pattern).\n\n system_dir: Path of directory containing\n system summaries.\n system_filename_pattern: Regex string for matching\n system summary filenames.\n model_dir: Path of directory containing\n model summaries.\n model_filename_pattern: Regex string for matching model\n summary filenames.\n config_file_path: Path of the configuration file.\n system_id: Optional system ID string which\n will appear in the ROUGE output.\n\n \"\"\"\n system_filenames = [f for f in os.listdir(system_dir)]\n system_models_tuples = []\n\n system_filename_pattern = re.compile(system_filename_pattern)\n for system_filename in sorted(system_filenames):\n match = system_filename_pattern.match(system_filename)\n if match:\n id = match.groups(0)[0]\n model_filenames = Rouge155.__get_model_filenames_for_id(\n id, model_dir, model_filename_pattern)\n system_models_tuples.append(\n (system_filename, sorted(model_filenames)))\n if not system_models_tuples:\n raise Exception(\n \"Did not find any files matching the pattern {} \"\n \"in the system summaries directory {}.\".format(\n system_filename_pattern.pattern, system_dir))\n\n with codecs.open(config_file_path, 'w', encoding='utf-8') as f:\n f.write('<ROUGE-EVAL version=\"1.55\">')\n for task_id, (system_filename, model_filenames) in enumerate(\n system_models_tuples, start=1):\n\n eval_string = Rouge155.__get_eval_string(\n task_id, system_id,\n system_dir, system_filename,\n model_dir, model_filenames)\n f.write(eval_string)\n f.write(\"</ROUGE-EVAL>\")\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.evaluate | python | def evaluate(self, system_id=1, rouge_args=None):
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output | Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L322-L343 | [
"def write_config(self, config_file_path=None, system_id=None):\n \"\"\"\n Write the ROUGE configuration file, which is basically a list\n of system summary files and their matching model summary files.\n\n This is a non-static version of write_config_file_static().\n\n config_file_path: Path of the configuration file.\n system_id: Optional system ID string which will\n appear in the ROUGE output.\n\n \"\"\"\n if not system_id:\n system_id = 1\n if (not config_file_path) or (not self._config_dir):\n self._config_dir = mkdtemp()\n config_filename = \"rouge_conf.xml\"\n else:\n config_dir, config_filename = os.path.split(config_file_path)\n verify_dir(config_dir, \"configuration file\")\n self._config_file = os.path.join(self._config_dir, config_filename)\n Rouge155.write_config_static(\n self._system_dir, self._system_filename_pattern,\n self._model_dir, self._model_filename_pattern,\n self._config_file, system_id)\n self.log.info(\n \"Written ROUGE configuration to {}\".format(self._config_file))\n",
"def __get_options(self, rouge_args=None):\n \"\"\"\n Get supplied command line arguments for ROUGE or use default\n ones.\n\n \"\"\"\n if self.args:\n options = self.args.split()\n elif rouge_args:\n options = rouge_args.split()\n else:\n options = [\n '-e', self._data_dir,\n '-c', 95,\n '-2',\n '-1',\n '-U',\n '-r', 1000,\n '-n', 4,\n '-w', 1.2,\n '-a',\n ]\n options = list(map(str, options))\n\n options = self.__add_config_option(options)\n return options\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.convert_and_evaluate | python | def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output | Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L345-L368 | [
"def split_sentences(self):\n \"\"\"\n ROUGE requires texts split into sentences. In case the texts\n are not already split, this method can be used.\n\n \"\"\"\n from pyrouge.utils.sentence_splitter import PunktSentenceSplitter\n self.log.info(\"Splitting sentences.\")\n ss = PunktSentenceSplitter()\n sent_split_to_string = lambda s: \"\\n\".join(ss.split(s))\n process_func = partial(\n DirectoryProcessor.process, function=sent_split_to_string)\n self.__process_summaries(process_func)\n",
"def evaluate(self, system_id=1, rouge_args=None):\n \"\"\"\n Run ROUGE to evaluate the system summaries in system_dir against\n the model summaries in model_dir. The summaries are assumed to\n be in the one-sentence-per-line HTML format ROUGE understands.\n\n system_id: Optional system ID which will be printed in\n ROUGE's output.\n\n Returns: Rouge output as string.\n\n \"\"\"\n self.write_config(system_id=system_id)\n options = self.__get_options(rouge_args)\n command = [self._bin_path] + options\n env = None\n if hasattr(self, \"_home_dir\") and self._home_dir:\n env = {'ROUGE_EVAL_HOME': self._home_dir}\n self.log.info(\n \"Running ROUGE with command {}\".format(\" \".join(command)))\n rouge_output = check_output(command, env=env).decode(\"UTF-8\")\n return rouge_output\n",
"def __write_summaries(self):\n self.log.info(\"Writing summaries.\")\n self.__process_summaries(self.convert_summaries_to_rouge_format)\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.output_to_dict | python | def output_to_dict(self, output):
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results | Convert the ROUGE output into python dictionary for further
processing. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L370-L396 | null | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__set_rouge_dir | python | def __set_rouge_dir(self, home_dir=None):
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path)) | Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L401-L418 | [
"def save_home_dir(self):\n config = ConfigParser()\n section = 'pyrouge settings'\n config.add_section(section)\n config.set(section, 'home_dir', self._home_dir)\n with open(self._settings_file, 'w') as f:\n config.write(f)\n self.log.info(\"Set ROUGE home directory to {}.\".format(self._home_dir))\n",
"def __get_rouge_home_dir_from_settings(self):\n config = ConfigParser()\n with open(self._settings_file) as f:\n if hasattr(config, \"read_file\"):\n config.read_file(f)\n else:\n # use deprecated python 2.x method\n config.readfp(f)\n rouge_home_dir = config.get('pyrouge settings', 'home_dir')\n return rouge_home_dir\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__get_eval_string | python | def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string | ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L432-L471 | null | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__process_summaries | python | def __process_summaries(self, process_func):
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir | Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L473-L491 | [
"def convert_summaries_to_rouge_format(input_dir, output_dir):\n \"\"\"\n Convert all files in input_dir into a format ROUGE understands\n and saves the files to output_dir. The input files are assumed\n to be plain text with one sentence per line.\n\n input_dir: Path of directory containing the input files.\n output_dir: Path of directory in which the converted files\n will be saved.\n\n \"\"\"\n DirectoryProcessor.process(\n input_dir, output_dir, Rouge155.convert_text_to_rouge_format)\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__get_options | python | def __get_options(self, rouge_args=None):
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options | Get supplied command line arguments for ROUGE or use default
ones. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L509-L534 | [
"def __add_config_option(self, options):\n return options + ['-m'] + [self._config_file]\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__create_dir_property | python | def __create_dir_property(self, dir_name, docstring):
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p) | Generate getter and setter for a directory property. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L536-L553 | null | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__set_dir_properties | python | def __set_dir_properties(self):
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring) | Automatically generate the properties for directories. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L555-L567 | [
"def __create_dir_property(self, dir_name, docstring):\n \"\"\"\n Generate getter and setter for a directory property.\n\n \"\"\"\n property_name = \"{}_dir\".format(dir_name)\n private_name = \"_\" + property_name\n setattr(self, private_name, None)\n\n def fget(self):\n return getattr(self, private_name)\n\n def fset(self, path):\n verify_dir(path, dir_name)\n setattr(self, private_name, path)\n\n p = property(fget=fget, fset=fset, doc=docstring)\n setattr(self.__class__, property_name, p)\n"
] | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
bheinzerling/pyrouge | pyrouge/Rouge155.py | Rouge155.__clean_rouge_args | python | def __clean_rouge_args(self, rouge_args):
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args | Remove enclosing quotation marks, if any. | train | https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L569-L582 | null | class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = None
if hasattr(self, "_home_dir") and self._home_dir:
env = {'ROUGE_EVAL_HOME': self._home_dir}
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
|
ottogroup/palladium | palladium/util.py | apply_kwargs | python | def apply_kwargs(func, **kwargs):
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs) | Call *func* with kwargs, but only those kwargs that it accepts. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L51-L59 | null | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | args_from_config | python | def args_from_config(func):
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper | Decorator that injects parameters from the configuration. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L62-L84 | null | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | memory_usage_psutil | python | def memory_usage_psutil():
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms | Return the current process memory usage in MB. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L196-L202 | null | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | version_cmd | python | def version_cmd(argv=sys.argv[1:]): # pragma: no cover
docopt(version_cmd.__doc__, argv=argv)
print(__version__) | \
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L205-L216 | null | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | upgrade_cmd | python | def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to']) | \
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L227-L245 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | export_cmd | python | def export_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version)) | \
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L262-L286 | null | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/util.py | Partial | python | def Partial(func, **kwargs):
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func | Allows the use of partially applied functions in the
configuration. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L333-L341 | [
"def resolve_dotted_name(dotted_name):\n if ':' in dotted_name:\n module, name = dotted_name.split(':')\n elif '.' in dotted_name:\n module, name = dotted_name.rsplit('.', 1)\n else:\n module, name = dotted_name, None\n\n attr = import_module(module)\n if name:\n for name in name.split('.'):\n attr = getattr(attr, name)\n\n return attr\n"
] | """Assorted utilties.
"""
from collections import UserDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from functools import update_wrapper
from functools import wraps
import logging
from importlib import import_module
from inspect import signature
from inspect import getcallargs
import os
import sys
import threading
from time import sleep
from time import time
import traceback
import uuid
import dateutil.parser
import dateutil.rrule
from docopt import docopt
import psutil
from . import __version__
from .config import get_config
from .config import initialize_config
from .config import PALLADIUM_CONFIG_ERROR
logger = logging.getLogger('palladium')
def resolve_dotted_name(dotted_name):
if ':' in dotted_name:
module, name = dotted_name.split(':')
elif '.' in dotted_name:
module, name = dotted_name.rsplit('.', 1)
else:
module, name = dotted_name, None
attr = import_module(module)
if name:
for name in name.split('.'):
attr = getattr(attr, name)
return attr
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs)
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper
@contextmanager
def timer(log=None, message=None):
if log is not None:
log("{}...".format(message))
info = {}
t0 = time()
yield info
info['elapsed'] = time() - t0
if log is not None:
log("{} done in {:.3f} sec.".format(message, info['elapsed']))
@contextmanager
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class ProcessStore(UserDict):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
class RruleThread(threading.Thread):
"""Calls a given function in intervals defined by given recurrence
rules (from `datetuil.rrule`).
"""
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True
@classmethod
def _rrule_from_dict(cls, rrule):
kwargs = rrule.copy()
for key, value in rrule.items():
# Allow constants in datetutil.rrule to be passed as strings
if isinstance(value, str) and hasattr(dateutil.rrule, value):
kwargs[key] = getattr(dateutil.rrule, value)
dstart = kwargs.get('dtstart')
if isinstance(dstart, str):
kwargs['dtstart'] = dateutil.parser.parse(dstart)
return dateutil.rrule.rrule(**kwargs)
def run(self):
while self.alive:
now = datetime.now()
if not self.rrule.between(self.last_execution, now):
sleep(self.sleep_between_checks)
continue
self.last_execution = now
try:
self.func()
except:
logger.exception(
"Failed to call {}".format(self.func.__name__))
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms
def version_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Print the version number of Palladium.
Usage:
pld-version [options]
Options:
-h --help Show this screen.
"""
docopt(version_cmd.__doc__, argv=argv)
print(__version__)
@args_from_config
def upgrade(model_persister, from_version=None, to_version=None):
kwargs = {'from_version': from_version}
if to_version is not None:
kwargs['to_version'] = to_version
model_persister.upgrade(**kwargs)
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
@args_from_config
def export(
model_persister,
model_persister_export,
model_version=None,
activate=True,
):
model = model_persister.read(model_version)
model_version_export = model_persister_export.write(model)
if activate:
model_persister_export.activate(model_version_export)
return model_version_export
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version))
class PluggableDecorator:
def __init__(self, decorator_config_name):
self.decorator_config_name = decorator_config_name
self.wrapped = None
def __call__(self, func):
self.func = func
def wrapper(*args, **kwargs):
# The motivation here is that we want to defer loading the
# configuration until the function is called for the first
# time.
if self.wrapped is None:
func = self.func
decorators = get_config().get(
self.decorator_config_name, [])
self.decorators = [
resolve_dotted_name(dec) if isinstance(dec, str) else dec
for dec in decorators
]
orig_func = func
for decorator in self.decorators:
func = decorator(func)
if self.decorators:
self.wrapped = wraps(orig_func)(func)
else:
self.wrapped = orig_func
return self.wrapped(*args, **kwargs)
return wraps(func)(wrapper)
@PluggableDecorator('get_metadata_decorators')
def get_metadata(error_code=0, error_message=None, status='OK'):
metadata = {
'status': status,
'error_code': error_code,
}
if error_message is not None:
metadata['error_message'] = error_message
metadata.update(get_config().get('service_metadata', {}))
return metadata
def _run_job(func, job_id, params):
jobs = process_store['process_metadata'].setdefault('jobs', {})
job = jobs[job_id] = {
'func': repr(func),
'started': str(datetime.utcnow()),
'status': 'running',
'thread': threading.get_ident(),
}
try:
retval = func(**params)
except:
job['status'] = 'error'
job['info'] = traceback.format_exc()
else:
job['status'] = 'finished'
job['info'] = str(retval)
def run_job(func, **params):
job_id = str(uuid.uuid4())
thread = threading.Thread(
target=_run_job,
kwargs={'func': func, 'job_id': job_id, 'params': params},
)
thread.start()
return thread, job_id
|
ottogroup/palladium | palladium/server.py | make_ujson_response | python | def make_ujson_response(obj, status_code=200):
json_encoded = ujson.encode(obj, ensure_ascii=False, double_precision=-1)
resp = make_response(json_encoded)
resp.mimetype = 'application/json'
resp.content_type = 'application/json; charset=utf-8'
resp.status_code = status_code
return resp | Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L32-L43 | null | """HTTP API implementation.
"""
import sys
from docopt import docopt
from flask import Flask
from flask import make_response
from flask import request
import numpy as np
import ujson
from werkzeug.exceptions import BadRequest
from . import __version__
from .fit import activate as activate_base
from .fit import fit as fit_base
from .interfaces import PredictError
from .util import args_from_config
from .util import get_config
from .util import get_metadata
from .util import initialize_config
from .util import logger
from .util import memory_usage_psutil
from .util import PluggableDecorator
from .util import process_store
from .util import run_job
from .util import resolve_dotted_name
app = Flask(__name__)
class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
def predict(model_persister, predict_service):
try:
model = model_persister.read()
response = predict_service(model, request)
except Exception as exc:
logger.exception("Unexpected error")
response = make_ujson_response({
"status": "ERROR",
"error_code": -1,
"error_message": "{}: {}".format(exc.__class__.__name__, str(exc)),
}, status_code=500)
return response
@app.route('/alive')
@PluggableDecorator('alive_decorators')
@args_from_config
def alive(alive=None):
if alive is None:
alive = {}
mem, mem_vms = memory_usage_psutil()
info = {
'memory_usage': mem, # rss, resident set size
'memory_usage_vms': mem_vms, # vms, virtual memory size
'palladium_version': __version__,
}
info['service_metadata'] = get_config().get('service_metadata', {})
status_code = 200
for attr in alive.get('process_store_required', ()):
obj = process_store.get(attr)
if obj is not None:
obj_info = {}
obj_info['updated'] = process_store.mtime[attr].isoformat()
if hasattr(obj, '__metadata__'):
obj_info['metadata'] = obj.__metadata__
info[attr] = obj_info
else:
info[attr] = "N/A"
status_code = 503
info['process_metadata'] = process_store['process_metadata']
return make_ujson_response(info, status_code=status_code)
def create_predict_function(
route, predict_service, decorator_list_name, config):
"""Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests.
"""
model_persister = config.get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0].
"""
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments['--host'],
port=int(arguments['--port']),
debug=int(arguments['--debug']),
)
class PredictStream:
"""A class that helps make predictions through stdin and stdout.
"""
def __init__(self):
self.model = get_config()['model_persister'].read()
self.predict_service = get_config()['predict_service']
def process_line(self, line):
predict_service = self.predict_service
datas = ujson.loads(line)
samples = [predict_service.sample_from_data(self.model, data)
for data in datas]
samples = np.array(samples)
params = predict_service.params_from_data(self.model, datas[0])
return predict_service.predict(self.model, samples, **params)
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush()
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen.
"""
docopt(stream_cmd.__doc__, argv=argv)
initialize_config()
stream = PredictStream()
stream.listen(sys.stdin, sys.stdout, sys.stderr)
@app.route('/list')
@PluggableDecorator('list_decorators')
@args_from_config
def list(model_persister):
info = {
'models': model_persister.list_models(),
'properties': model_persister.list_properties(),
}
return make_ujson_response(info)
@PluggableDecorator('server_fit_decorators')
@args_from_config
def fit():
param_converters = {
'persist': lambda x: x.lower() in ('1', 't', 'true'),
'activate': lambda x: x.lower() in ('1', 't', 'true'),
'evaluate': lambda x: x.lower() in ('1', 't', 'true'),
'persist_if_better_than': float,
}
params = {
name: typ(request.form[name])
for name, typ in param_converters.items()
if name in request.form
}
thread, job_id = run_job(fit_base, **params)
return make_ujson_response({'job_id': job_id}, status_code=200)
@PluggableDecorator('update_model_cache_decorators')
@args_from_config
def update_model_cache(model_persister):
method = getattr(model_persister, 'update_cache', None)
if method is not None:
thread, job_id = run_job(model_persister.update_cache)
return make_ujson_response({'job_id': job_id}, status_code=200)
else:
return make_ujson_response({}, status_code=503)
@PluggableDecorator('activate_decorators')
def activate():
model_version = int(request.form['model_version'])
try:
activate_base(model_version=model_version)
except LookupError:
return make_ujson_response({}, status_code=503)
else:
return list()
def add_url_rule(rule, endpoint=None, view_func=None, app=app, **options):
if isinstance(view_func, str):
view_func = resolve_dotted_name(view_func)
app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
|
ottogroup/palladium | palladium/server.py | create_predict_function | python | def create_predict_function(
route, predict_service, decorator_list_name, config):
model_persister = config.get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func | Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L270-L296 | null | """HTTP API implementation.
"""
import sys
from docopt import docopt
from flask import Flask
from flask import make_response
from flask import request
import numpy as np
import ujson
from werkzeug.exceptions import BadRequest
from . import __version__
from .fit import activate as activate_base
from .fit import fit as fit_base
from .interfaces import PredictError
from .util import args_from_config
from .util import get_config
from .util import get_metadata
from .util import initialize_config
from .util import logger
from .util import memory_usage_psutil
from .util import PluggableDecorator
from .util import process_store
from .util import run_job
from .util import resolve_dotted_name
app = Flask(__name__)
def make_ujson_response(obj, status_code=200):
"""Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response.
"""
json_encoded = ujson.encode(obj, ensure_ascii=False, double_precision=-1)
resp = make_response(json_encoded)
resp.mimetype = 'application/json'
resp.content_type = 'application/json; charset=utf-8'
resp.status_code = status_code
return resp
class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
def predict(model_persister, predict_service):
try:
model = model_persister.read()
response = predict_service(model, request)
except Exception as exc:
logger.exception("Unexpected error")
response = make_ujson_response({
"status": "ERROR",
"error_code": -1,
"error_message": "{}: {}".format(exc.__class__.__name__, str(exc)),
}, status_code=500)
return response
@app.route('/alive')
@PluggableDecorator('alive_decorators')
@args_from_config
def alive(alive=None):
if alive is None:
alive = {}
mem, mem_vms = memory_usage_psutil()
info = {
'memory_usage': mem, # rss, resident set size
'memory_usage_vms': mem_vms, # vms, virtual memory size
'palladium_version': __version__,
}
info['service_metadata'] = get_config().get('service_metadata', {})
status_code = 200
for attr in alive.get('process_store_required', ()):
obj = process_store.get(attr)
if obj is not None:
obj_info = {}
obj_info['updated'] = process_store.mtime[attr].isoformat()
if hasattr(obj, '__metadata__'):
obj_info['metadata'] = obj.__metadata__
info[attr] = obj_info
else:
info[attr] = "N/A"
status_code = 503
info['process_metadata'] = process_store['process_metadata']
return make_ujson_response(info, status_code=status_code)
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0].
"""
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments['--host'],
port=int(arguments['--port']),
debug=int(arguments['--debug']),
)
class PredictStream:
"""A class that helps make predictions through stdin and stdout.
"""
def __init__(self):
self.model = get_config()['model_persister'].read()
self.predict_service = get_config()['predict_service']
def process_line(self, line):
predict_service = self.predict_service
datas = ujson.loads(line)
samples = [predict_service.sample_from_data(self.model, data)
for data in datas]
samples = np.array(samples)
params = predict_service.params_from_data(self.model, datas[0])
return predict_service.predict(self.model, samples, **params)
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush()
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen.
"""
docopt(stream_cmd.__doc__, argv=argv)
initialize_config()
stream = PredictStream()
stream.listen(sys.stdin, sys.stdout, sys.stderr)
@app.route('/list')
@PluggableDecorator('list_decorators')
@args_from_config
def list(model_persister):
info = {
'models': model_persister.list_models(),
'properties': model_persister.list_properties(),
}
return make_ujson_response(info)
@PluggableDecorator('server_fit_decorators')
@args_from_config
def fit():
param_converters = {
'persist': lambda x: x.lower() in ('1', 't', 'true'),
'activate': lambda x: x.lower() in ('1', 't', 'true'),
'evaluate': lambda x: x.lower() in ('1', 't', 'true'),
'persist_if_better_than': float,
}
params = {
name: typ(request.form[name])
for name, typ in param_converters.items()
if name in request.form
}
thread, job_id = run_job(fit_base, **params)
return make_ujson_response({'job_id': job_id}, status_code=200)
@PluggableDecorator('update_model_cache_decorators')
@args_from_config
def update_model_cache(model_persister):
method = getattr(model_persister, 'update_cache', None)
if method is not None:
thread, job_id = run_job(model_persister.update_cache)
return make_ujson_response({'job_id': job_id}, status_code=200)
else:
return make_ujson_response({}, status_code=503)
@PluggableDecorator('activate_decorators')
def activate():
model_version = int(request.form['model_version'])
try:
activate_base(model_version=model_version)
except LookupError:
return make_ujson_response({}, status_code=503)
else:
return list()
def add_url_rule(rule, endpoint=None, view_func=None, app=app, **options):
if isinstance(view_func, str):
view_func = resolve_dotted_name(view_func)
app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
|
ottogroup/palladium | palladium/server.py | devserver_cmd | python | def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments['--host'],
port=int(arguments['--port']),
debug=int(arguments['--debug']),
) | \
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0]. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L299-L321 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """HTTP API implementation.
"""
import sys
from docopt import docopt
from flask import Flask
from flask import make_response
from flask import request
import numpy as np
import ujson
from werkzeug.exceptions import BadRequest
from . import __version__
from .fit import activate as activate_base
from .fit import fit as fit_base
from .interfaces import PredictError
from .util import args_from_config
from .util import get_config
from .util import get_metadata
from .util import initialize_config
from .util import logger
from .util import memory_usage_psutil
from .util import PluggableDecorator
from .util import process_store
from .util import run_job
from .util import resolve_dotted_name
app = Flask(__name__)
def make_ujson_response(obj, status_code=200):
"""Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response.
"""
json_encoded = ujson.encode(obj, ensure_ascii=False, double_precision=-1)
resp = make_response(json_encoded)
resp.mimetype = 'application/json'
resp.content_type = 'application/json; charset=utf-8'
resp.status_code = status_code
return resp
class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
def predict(model_persister, predict_service):
try:
model = model_persister.read()
response = predict_service(model, request)
except Exception as exc:
logger.exception("Unexpected error")
response = make_ujson_response({
"status": "ERROR",
"error_code": -1,
"error_message": "{}: {}".format(exc.__class__.__name__, str(exc)),
}, status_code=500)
return response
@app.route('/alive')
@PluggableDecorator('alive_decorators')
@args_from_config
def alive(alive=None):
if alive is None:
alive = {}
mem, mem_vms = memory_usage_psutil()
info = {
'memory_usage': mem, # rss, resident set size
'memory_usage_vms': mem_vms, # vms, virtual memory size
'palladium_version': __version__,
}
info['service_metadata'] = get_config().get('service_metadata', {})
status_code = 200
for attr in alive.get('process_store_required', ()):
obj = process_store.get(attr)
if obj is not None:
obj_info = {}
obj_info['updated'] = process_store.mtime[attr].isoformat()
if hasattr(obj, '__metadata__'):
obj_info['metadata'] = obj.__metadata__
info[attr] = obj_info
else:
info[attr] = "N/A"
status_code = 503
info['process_metadata'] = process_store['process_metadata']
return make_ujson_response(info, status_code=status_code)
def create_predict_function(
route, predict_service, decorator_list_name, config):
"""Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests.
"""
model_persister = config.get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func
class PredictStream:
"""A class that helps make predictions through stdin and stdout.
"""
def __init__(self):
self.model = get_config()['model_persister'].read()
self.predict_service = get_config()['predict_service']
def process_line(self, line):
predict_service = self.predict_service
datas = ujson.loads(line)
samples = [predict_service.sample_from_data(self.model, data)
for data in datas]
samples = np.array(samples)
params = predict_service.params_from_data(self.model, datas[0])
return predict_service.predict(self.model, samples, **params)
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush()
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen.
"""
docopt(stream_cmd.__doc__, argv=argv)
initialize_config()
stream = PredictStream()
stream.listen(sys.stdin, sys.stdout, sys.stderr)
@app.route('/list')
@PluggableDecorator('list_decorators')
@args_from_config
def list(model_persister):
info = {
'models': model_persister.list_models(),
'properties': model_persister.list_properties(),
}
return make_ujson_response(info)
@PluggableDecorator('server_fit_decorators')
@args_from_config
def fit():
param_converters = {
'persist': lambda x: x.lower() in ('1', 't', 'true'),
'activate': lambda x: x.lower() in ('1', 't', 'true'),
'evaluate': lambda x: x.lower() in ('1', 't', 'true'),
'persist_if_better_than': float,
}
params = {
name: typ(request.form[name])
for name, typ in param_converters.items()
if name in request.form
}
thread, job_id = run_job(fit_base, **params)
return make_ujson_response({'job_id': job_id}, status_code=200)
@PluggableDecorator('update_model_cache_decorators')
@args_from_config
def update_model_cache(model_persister):
method = getattr(model_persister, 'update_cache', None)
if method is not None:
thread, job_id = run_job(model_persister.update_cache)
return make_ujson_response({'job_id': job_id}, status_code=200)
else:
return make_ujson_response({}, status_code=503)
@PluggableDecorator('activate_decorators')
def activate():
model_version = int(request.form['model_version'])
try:
activate_base(model_version=model_version)
except LookupError:
return make_ujson_response({}, status_code=503)
else:
return list()
def add_url_rule(rule, endpoint=None, view_func=None, app=app, **options):
if isinstance(view_func, str):
view_func = resolve_dotted_name(view_func)
app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.