diff --git a/.gitattributes b/.gitattributes index e0e7e5ab247681b4141fc246a577c526e3dcdcdb..c3ef1539cd6e5610ecb173fd0448181b2512c39a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1694,3 +1694,4 @@ vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310. vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/__init__.py b/vllm/lib/python3.10/site-packages/et_xmlfile/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..776a146c7ba8812ff7b37b1a028707af91e92d07 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/et_xmlfile/__init__.py @@ -0,0 +1,8 @@ +from .xmlfile import xmlfile + +# constants +__version__ = '2.0.0' +__author__ = 'See AUTHORS.txt' +__license__ = 'MIT' +__author_email__ = 'charlie.clark@clark-consulting.eu' +__url__ = 'https://foss.heptapod.net/openpyxl/et_xmlfile' diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e564cd4d74fff9b8a613f867470b5c49f8340b1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/incremental_tree.cpython-310.pyc b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/incremental_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68c4032d066b09329c085bef686a9a45f10e0994 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/incremental_tree.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/xmlfile.cpython-310.pyc b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/xmlfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7d41e631652dffe2bd29b279c5b347940b42847 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/et_xmlfile/__pycache__/xmlfile.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/incremental_tree.py b/vllm/lib/python3.10/site-packages/et_xmlfile/incremental_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..b735c1b5c1e59e710af2161f85fd425a53d4117e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/et_xmlfile/incremental_tree.py @@ -0,0 +1,917 @@ +# Code modified from cPython's Lib/xml/etree/ElementTree.py +# The write() code is modified to allow specifying a particular namespace +# uri -> prefix mapping. +# +# --------------------------------------------------------------------- +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. +# +# ElementTree +# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- +import contextlib +import io + +import xml.etree.ElementTree as ET + + +def current_global_nsmap(): + return { + prefix: uri for uri, prefix in ET._namespace_map.items() + } + + +class IncrementalTree(ET.ElementTree): + + def write( + self, + file_or_filename, + encoding=None, + xml_declaration=None, + default_namespace=None, + method=None, + *, + short_empty_elements=True, + nsmap=None, + root_ns_only=False, + minimal_ns_only=False, + ): + """Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns"). + Takes precedence over any default namespace + provided in nsmap or + xml.etree.ElementTree.register_namespace(). + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + *nsmap* -- a mapping of namespace prefixes to URIs. These take + precedence over any mappings registered using + xml.etree.ElementTree.register_namespace(). The + default_namespace argument, if supplied, takes precedence + over any default namespace supplied in nsmap. All supplied + namespaces will be declared on the root element, even if + unused in the document. + + *root_ns_only* -- bool indicating namespace declrations should only + be written on the root element. This requires two + passes of the xml tree adding additional time to + the writing process. This is primarily meant to + mimic xml.etree.ElementTree's behaviour. + + *minimal_ns_only* -- bool indicating only namespaces that were used + to qualify elements or attributes should be + declared. All namespace declarations will be + written on the root element regardless of the + value of the root_ns_only arg. Requires two + passes of the xml tree adding additional time to + the writing process. + + """ + if not method: + method = "xml" + elif method not in ("text", "xml", "html"): + raise ValueError("unknown method %r" % method) + if not encoding: + encoding = "us-ascii" + + with _get_writer(file_or_filename, encoding) as (write, declared_encoding): + if method == "xml" and ( + xml_declaration + or ( + xml_declaration is None + and encoding.lower() != "unicode" + and declared_encoding.lower() not in ("utf-8", "us-ascii") + ) + ): + write("\n" % (declared_encoding,)) + if method == "text": + ET._serialize_text(write, self._root) + else: + if method == "xml": + is_html = False + else: + is_html = True + if nsmap: + if None in nsmap: + raise ValueError( + 'Found None as default nsmap prefix in nsmap. ' + 'Use "" as the default namespace prefix.' + ) + new_nsmap = nsmap.copy() + else: + new_nsmap = {} + if default_namespace: + new_nsmap[""] = default_namespace + if root_ns_only or minimal_ns_only: + # _namespaces returns a mapping of only the namespaces that + # were used. + new_nsmap = _namespaces( + self._root, + default_namespace, + new_nsmap, + ) + if not minimal_ns_only: + if nsmap: + # We want all namespaces defined in the provided + # nsmap to be declared regardless of whether + # they've been used. + new_nsmap.update(nsmap) + if default_namespace: + new_nsmap[""] = default_namespace + global_nsmap = { + prefix: uri for uri, prefix in ET._namespace_map.items() + } + if None in global_nsmap: + raise ValueError( + 'Found None as default nsmap prefix in nsmap registered with ' + 'register_namespace. Use "" for the default namespace prefix.' + ) + nsmap_scope = {} + _serialize_ns_xml( + write, + self._root, + nsmap_scope, + global_nsmap, + is_html=is_html, + is_root=True, + short_empty_elements=short_empty_elements, + new_nsmap=new_nsmap, + ) + + +def _make_new_ns_prefix( + nsmap_scope, + global_prefixes, + local_nsmap=None, + default_namespace=None, +): + i = len(nsmap_scope) + if default_namespace is not None and "" not in nsmap_scope: + # Keep the same numbering scheme as python which assumes the default + # namespace is present if supplied. + i += 1 + + while True: + prefix = f"ns{i}" + if ( + prefix not in nsmap_scope + and prefix not in global_prefixes + and ( + not local_nsmap or prefix not in local_nsmap + ) + ): + return prefix + i += 1 + + +def _get_or_create_prefix( + uri, + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + for_default_namespace_attr_prefix=False, +): + """Find a prefix that doesn't conflict with the ns scope or create a new prefix + + This function mutates nsmap_scope, global_nsmap, new_namespace_prefixes and + uri_to_prefix. It is intended to keep state in _serialize_ns_xml consistent + while deduplicating the house keeping code or updating these dictionaries. + """ + # Check if we can reuse an existing (global) prefix within the current + # namespace scope. There maybe many prefixes pointing to a single URI by + # this point and we need to select a prefix that is not in use in the + # current scope. + for global_prefix, global_uri in global_nsmap.items(): + if uri == global_uri and global_prefix not in nsmap_scope: + prefix = global_prefix + break + else: # no break + # We couldn't find a suitable existing prefix for this namespace scope, + # let's create a new one. + prefix = _make_new_ns_prefix(nsmap_scope, global_prefixes=global_nsmap) + global_nsmap[prefix] = uri + nsmap_scope[prefix] = uri + if not for_default_namespace_attr_prefix: + # Don't override the actual default namespace prefix + uri_to_prefix[uri] = prefix + if prefix != "xml": + new_namespace_prefixes.add(prefix) + return prefix + + +def _find_default_namespace_attr_prefix( + default_namespace, + nsmap, + local_nsmap, + global_prefixes, + provided_default_namespace=None, +): + # Search the provided nsmap for any prefixes for this uri that aren't the + # default namespace "" + for prefix, uri in nsmap.items(): + if uri == default_namespace and prefix != "": + return prefix + + for prefix, uri in local_nsmap.items(): + if uri == default_namespace and prefix != "": + return prefix + + # _namespace_map is a 1:1 mapping of uri -> prefix + prefix = ET._namespace_map.get(default_namespace) + if prefix and prefix not in nsmap: + return prefix + + return _make_new_ns_prefix( + nsmap, + global_prefixes, + local_nsmap, + provided_default_namespace, + ) + + +def process_attribs( + elem, + is_nsmap_scope_changed, + default_ns_attr_prefix, + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, +): + item_parts = [] + for k, v in elem.items(): + if isinstance(k, ET.QName): + k = k.text + try: + if k[:1] == "{": + uri_and_name = k[1:].rsplit("}", 1) + try: + prefix = uri_to_prefix[uri_and_name[0]] + except KeyError: + if not is_nsmap_scope_changed: + # We're about to mutate the these dicts so + # let's copy them first. We don't have to + # recompute other mappings as we're looking up + # or creating a new prefix + nsmap_scope = nsmap_scope.copy() + uri_to_prefix = uri_to_prefix.copy() + is_nsmap_scope_changed = True + prefix = _get_or_create_prefix( + uri_and_name[0], + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + ) + + if not prefix: + if default_ns_attr_prefix: + prefix = default_ns_attr_prefix + else: + for prefix, known_uri in nsmap_scope.items(): + if known_uri == uri_and_name[0] and prefix != "": + default_ns_attr_prefix = prefix + break + else: # no break + if not is_nsmap_scope_changed: + # We're about to mutate the these dicts so + # let's copy them first. We don't have to + # recompute other mappings as we're looking up + # or creating a new prefix + nsmap_scope = nsmap_scope.copy() + uri_to_prefix = uri_to_prefix.copy() + is_nsmap_scope_changed = True + prefix = _get_or_create_prefix( + uri_and_name[0], + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + for_default_namespace_attr_prefix=True, + ) + default_ns_attr_prefix = prefix + k = f"{prefix}:{uri_and_name[1]}" + except TypeError: + ET._raise_serialization_error(k) + + if isinstance(v, ET.QName): + if v.text[:1] != "{": + v = v.text + else: + uri_and_name = v.text[1:].rsplit("}", 1) + try: + prefix = uri_to_prefix[uri_and_name[0]] + except KeyError: + if not is_nsmap_scope_changed: + # We're about to mutate the these dicts so + # let's copy them first. We don't have to + # recompute other mappings as we're looking up + # or creating a new prefix + nsmap_scope = nsmap_scope.copy() + uri_to_prefix = uri_to_prefix.copy() + is_nsmap_scope_changed = True + prefix = _get_or_create_prefix( + uri_and_name[0], + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + ) + v = f"{prefix}:{uri_and_name[1]}" + item_parts.append((k, v)) + return item_parts, default_ns_attr_prefix, nsmap_scope + + +def write_elem_start( + write, + elem, + nsmap_scope, + global_nsmap, + short_empty_elements, + is_html, + is_root=False, + uri_to_prefix=None, + default_ns_attr_prefix=None, + new_nsmap=None, + **kwargs, +): + """Write the opening tag (including self closing) and element text. + + Refer to _serialize_ns_xml for description of arguments. + + nsmap_scope should be an empty dictionary on first call. All nsmap prefixes + must be strings with the default namespace prefix represented by "". + + eg. + - (returns tag = 'foo') + - text (returns tag = 'foo') + - (returns tag = None) + + Returns: + tag: + The tag name to be closed or None if no closing required. + nsmap_scope: + The current nsmap after any prefix to uri additions from this + element. This is the input dict if unmodified or an updated copy. + default_ns_attr_prefix: + The prefix for the default namespace to use with attrs. + uri_to_prefix: + The current uri to prefix map after any uri to prefix additions + from this element. This is the input dict if unmodified or an + updated copy. + next_remains_root: + A bool indicating if the child element(s) should be treated as + their own roots. + """ + tag = elem.tag + text = elem.text + + if tag is ET.Comment: + write("" % text) + tag = None + next_remains_root = False + elif tag is ET.ProcessingInstruction: + write("" % text) + tag = None + next_remains_root = False + else: + if new_nsmap: + is_nsmap_scope_changed = True + nsmap_scope = nsmap_scope.copy() + nsmap_scope.update(new_nsmap) + new_namespace_prefixes = set(new_nsmap.keys()) + new_namespace_prefixes.discard("xml") + # We need to recompute the uri to prefixes + uri_to_prefix = None + default_ns_attr_prefix = None + else: + is_nsmap_scope_changed = False + new_namespace_prefixes = set() + + if uri_to_prefix is None: + if None in nsmap_scope: + raise ValueError( + 'Found None as a namespace prefix. Use "" as the default namespace prefix.' + ) + uri_to_prefix = {uri: prefix for prefix, uri in nsmap_scope.items()} + if "" in nsmap_scope: + # There may be multiple prefixes for the default namespace but + # we want to make sure we preferentially use "" (for elements) + uri_to_prefix[nsmap_scope[""]] = "" + + if tag is None: + # tag supression where tag is set to None + # Don't change is_root so namespaces can be passed down + next_remains_root = is_root + if text: + write(ET._escape_cdata(text)) + else: + next_remains_root = False + if isinstance(tag, ET.QName): + tag = tag.text + try: + # These splits / fully qualified tag creationg are the + # bottleneck in this implementation vs the python + # implementation. + # The following split takes ~42ns with no uri and ~85ns if a + # prefix is present. If the uri was present, we then need to + # look up a prefix (~14ns) and create the fully qualified + # string (~41ns). This gives a total of ~140ns where a uri is + # present. + # Python's implementation needs to preprocess the tree to + # create a dict of qname -> tag by traversing the tree which + # takes a bit of extra time but it quickly makes that back by + # only having to do a dictionary look up (~14ns) for each tag / + # attrname vs our splitting (~140ns). + # So here we have the flexibility of being able to redefine the + # uri a prefix points to midway through serialisation at the + # expense of performance (~10% slower for a 1mb file on my + # machine). + if tag[:1] == "{": + uri_and_name = tag[1:].rsplit("}", 1) + try: + prefix = uri_to_prefix[uri_and_name[0]] + except KeyError: + if not is_nsmap_scope_changed: + # We're about to mutate the these dicts so let's + # copy them first. We don't have to recompute other + # mappings as we're looking up or creating a new + # prefix + nsmap_scope = nsmap_scope.copy() + uri_to_prefix = uri_to_prefix.copy() + is_nsmap_scope_changed = True + prefix = _get_or_create_prefix( + uri_and_name[0], + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + ) + if prefix: + tag = f"{prefix}:{uri_and_name[1]}" + else: + tag = uri_and_name[1] + elif "" in nsmap_scope: + raise ValueError( + "cannot use non-qualified names with default_namespace option" + ) + except TypeError: + ET._raise_serialization_error(tag) + + write("<" + tag) + + if elem.attrib: + item_parts, default_ns_attr_prefix, nsmap_scope = process_attribs( + elem, + is_nsmap_scope_changed, + default_ns_attr_prefix, + nsmap_scope, + global_nsmap, + new_namespace_prefixes, + uri_to_prefix, + ) + else: + item_parts = [] + if new_namespace_prefixes: + ns_attrs = [] + for k in sorted(new_namespace_prefixes): + v = nsmap_scope[k] + if k: + k = "xmlns:" + k + else: + k = "xmlns" + ns_attrs.append((k, v)) + if is_html: + write("".join([f' {k}="{ET._escape_attrib_html(v)}"' for k, v in ns_attrs])) + else: + write("".join([f' {k}="{ET._escape_attrib(v)}"' for k, v in ns_attrs])) + if item_parts: + if is_html: + write("".join([f' {k}="{ET._escape_attrib_html(v)}"' for k, v in item_parts])) + else: + write("".join([f' {k}="{ET._escape_attrib(v)}"' for k, v in item_parts])) + if is_html: + write(">") + ltag = tag.lower() + if text: + if ltag == "script" or ltag == "style": + write(text) + else: + write(ET._escape_cdata(text)) + if ltag in ET.HTML_EMPTY: + tag = None + elif text or len(elem) or not short_empty_elements: + write(">") + if text: + write(ET._escape_cdata(text)) + else: + tag = None + write(" />") + return ( + tag, + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + next_remains_root, + ) + + +def _serialize_ns_xml( + write, + elem, + nsmap_scope, + global_nsmap, + short_empty_elements, + is_html, + is_root=False, + uri_to_prefix=None, + default_ns_attr_prefix=None, + new_nsmap=None, + **kwargs, +): + """Serialize an element or tree using 'write' for output. + + Args: + write: + A function to write the xml to its destination. + elem: + The element to serialize. + nsmap_scope: + The current prefix to uri mapping for this element. This should be + an empty dictionary for the root element. Additional namespaces are + progressively added using the new_nsmap arg. + global_nsmap: + A dict copy of the globally registered _namespace_map in uri to + prefix form + short_empty_elements: + Controls the formatting of elements that contain no content. If True + (default) they are emitted as a single self-closed tag, otherwise + they are emitted as a pair of start/end tags. + is_html: + Set to True to serialize as HTML otherwise XML. + is_root: + Boolean indicating if this is a root element. + uri_to_prefix: + Current state of the mapping of uri to prefix. + default_ns_attr_prefix: + new_nsmap: + New prefix -> uri mapping to be applied to this element. + """ + ( + tag, + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + next_remains_root, + ) = write_elem_start( + write, + elem, + nsmap_scope, + global_nsmap, + short_empty_elements, + is_html, + is_root, + uri_to_prefix, + default_ns_attr_prefix, + new_nsmap=new_nsmap, + ) + for e in elem: + _serialize_ns_xml( + write, + e, + nsmap_scope, + global_nsmap, + short_empty_elements, + is_html, + next_remains_root, + uri_to_prefix, + default_ns_attr_prefix, + new_nsmap=None, + ) + if tag: + write(f"") + if elem.tail: + write(ET._escape_cdata(elem.tail)) + + +def _qnames_iter(elem): + """Iterate through all the qualified names in elem""" + seen_el_qnames = set() + seen_other_qnames = set() + for this_elem in elem.iter(): + tag = this_elem.tag + if isinstance(tag, str): + if tag not in seen_el_qnames: + seen_el_qnames.add(tag) + yield tag, True + elif isinstance(tag, ET.QName): + tag = tag.text + if tag not in seen_el_qnames: + seen_el_qnames.add(tag) + yield tag, True + elif ( + tag is not None + and tag is not ET.ProcessingInstruction + and tag is not ET.Comment + ): + ET._raise_serialization_error(tag) + + for key, value in this_elem.items(): + if isinstance(key, ET.QName): + key = key.text + if key not in seen_other_qnames: + seen_other_qnames.add(key) + yield key, False + + if isinstance(value, ET.QName): + if value.text not in seen_other_qnames: + seen_other_qnames.add(value.text) + yield value.text, False + + text = this_elem.text + if isinstance(text, ET.QName): + if text.text not in seen_other_qnames: + seen_other_qnames.add(text.text) + yield text.text, False + + +def _namespaces( + elem, + default_namespace=None, + nsmap=None, +): + """Find all namespaces used in the document and return a prefix to uri map""" + if nsmap is None: + nsmap = {} + + out_nsmap = {} + + seen_uri_to_prefix = {} + # Multiple prefixes may be present for a single uri. This will select the + # last prefix found in nsmap for a given uri. + local_prefix_map = {uri: prefix for prefix, uri in nsmap.items()} + if default_namespace is not None: + local_prefix_map[default_namespace] = "" + elif "" in nsmap: + # but we make sure the default prefix always take precedence + local_prefix_map[nsmap[""]] = "" + + global_prefixes = set(ET._namespace_map.values()) + has_unqual_el = False + default_namespace_attr_prefix = None + for qname, is_el in _qnames_iter(elem): + try: + if qname[:1] == "{": + uri_and_name = qname[1:].rsplit("}", 1) + + prefix = seen_uri_to_prefix.get(uri_and_name[0]) + if prefix is None: + prefix = local_prefix_map.get(uri_and_name[0]) + if prefix is None or prefix in out_nsmap: + prefix = ET._namespace_map.get(uri_and_name[0]) + if prefix is None or prefix in out_nsmap: + prefix = _make_new_ns_prefix( + out_nsmap, + global_prefixes, + nsmap, + default_namespace, + ) + if prefix or is_el: + out_nsmap[prefix] = uri_and_name[0] + seen_uri_to_prefix[uri_and_name[0]] = prefix + + if not is_el and not prefix and not default_namespace_attr_prefix: + # Find the alternative prefix to use with non-element + # names + default_namespace_attr_prefix = _find_default_namespace_attr_prefix( + uri_and_name[0], + out_nsmap, + nsmap, + global_prefixes, + default_namespace, + ) + out_nsmap[default_namespace_attr_prefix] = uri_and_name[0] + # Don't add this uri to prefix mapping as it might override + # the uri -> "" default mapping. We'll fix this up at the + # end of the fn. + # local_prefix_map[uri_and_name[0]] = default_namespace_attr_prefix + else: + if is_el: + has_unqual_el = True + except TypeError: + ET._raise_serialization_error(qname) + + if "" in out_nsmap and has_unqual_el: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with default_namespace option" + ) + + # The xml prefix doesn't need to be declared but may have been used to + # prefix names. Let's remove it if it has been used + out_nsmap.pop("xml", None) + return out_nsmap + + +def tostring( + element, + encoding=None, + method=None, + *, + xml_declaration=None, + default_namespace=None, + short_empty_elements=True, + nsmap=None, + root_ns_only=False, + minimal_ns_only=False, + tree_cls=IncrementalTree, +): + """Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + """ + stream = io.StringIO() if encoding == "unicode" else io.BytesIO() + tree_cls(element).write( + stream, + encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements, + nsmap=nsmap, + root_ns_only=root_ns_only, + minimal_ns_only=minimal_ns_only, + ) + return stream.getvalue() + + +def tostringlist( + element, + encoding=None, + method=None, + *, + xml_declaration=None, + default_namespace=None, + short_empty_elements=True, + nsmap=None, + root_ns_only=False, + minimal_ns_only=False, + tree_cls=IncrementalTree, +): + lst = [] + stream = ET._ListDataStream(lst) + tree_cls(element).write( + stream, + encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements, + nsmap=nsmap, + root_ns_only=root_ns_only, + minimal_ns_only=minimal_ns_only, + ) + return lst + + +def compat_tostring( + element, + encoding=None, + method=None, + *, + xml_declaration=None, + default_namespace=None, + short_empty_elements=True, + nsmap=None, + root_ns_only=True, + minimal_ns_only=False, + tree_cls=IncrementalTree, +): + """tostring with options that produce the same results as xml.etree.ElementTree.tostring + + root_ns_only=True is a bit slower than False as it needs to traverse the + tree one more time to collect all the namespaces. + """ + return tostring( + element, + encoding=encoding, + method=method, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + short_empty_elements=short_empty_elements, + nsmap=nsmap, + root_ns_only=root_ns_only, + minimal_ns_only=minimal_ns_only, + tree_cls=tree_cls, + ) + + +# -------------------------------------------------------------------- +# serialization support + +@contextlib.contextmanager +def _get_writer(file_or_filename, encoding): + # Copied from Python 3.12 + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + if encoding.lower() == "unicode": + encoding = "utf-8" + with open(file_or_filename, "w", encoding=encoding, + errors="xmlcharrefreplace") as file: + yield file.write, encoding + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding.lower() == "unicode": + # use a text writer as is + yield write, getattr(file_or_filename, "encoding", None) or "utf-8" + else: + # wrap a binary writer with TextIOWrapper + with contextlib.ExitStack() as stack: + if isinstance(file_or_filename, io.BufferedIOBase): + file = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + file = io.BufferedWriter(file_or_filename) + # Keep the original file open when the BufferedWriter is + # destroyed + stack.callback(file.detach) + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + file = io.BufferedIOBase() + file.writable = lambda: True + file.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + file.seekable = file_or_filename.seekable + file.tell = file_or_filename.tell + except AttributeError: + pass + file = io.TextIOWrapper(file, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n") + # Keep the original file open when the TextIOWrapper is + # destroyed + stack.callback(file.detach) + yield file.write, encoding diff --git a/vllm/lib/python3.10/site-packages/et_xmlfile/xmlfile.py b/vllm/lib/python3.10/site-packages/et_xmlfile/xmlfile.py new file mode 100644 index 0000000000000000000000000000000000000000..9b8ce82fe5eb98902379360205c03d0067782b61 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/et_xmlfile/xmlfile.py @@ -0,0 +1,158 @@ +from __future__ import absolute_import +# Copyright (c) 2010-2015 openpyxl + +"""Implements the lxml.etree.xmlfile API using the standard library xml.etree""" + + +from contextlib import contextmanager + +from xml.etree.ElementTree import ( + Element, + _escape_cdata, +) + +from . import incremental_tree + + +class LxmlSyntaxError(Exception): + pass + + +class _IncrementalFileWriter(object): + """Replacement for _IncrementalFileWriter of lxml""" + def __init__(self, output_file): + self._element_stack = [] + self._file = output_file + self._have_root = False + self.global_nsmap = incremental_tree.current_global_nsmap() + self.is_html = False + + @contextmanager + def element(self, tag, attrib=None, nsmap=None, **_extra): + """Create a new xml element using a context manager.""" + if nsmap and None in nsmap: + # Normalise None prefix (lxml's default namespace prefix) -> "", as + # required for incremental_tree + if "" in nsmap and nsmap[""] != nsmap[None]: + raise ValueError( + 'Found None and "" as default nsmap prefixes with different URIs' + ) + nsmap = nsmap.copy() + nsmap[""] = nsmap.pop(None) + + # __enter__ part + self._have_root = True + if attrib is None: + attrib = {} + elem = Element(tag, attrib=attrib, **_extra) + elem.text = '' + elem.tail = '' + if self._element_stack: + is_root = False + ( + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + ) = self._element_stack[-1] + else: + is_root = True + nsmap_scope = {} + default_ns_attr_prefix = None + uri_to_prefix = {} + ( + tag, + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + next_remains_root, + ) = incremental_tree.write_elem_start( + self._file, + elem, + nsmap_scope=nsmap_scope, + global_nsmap=self.global_nsmap, + short_empty_elements=False, + is_html=self.is_html, + is_root=is_root, + uri_to_prefix=uri_to_prefix, + default_ns_attr_prefix=default_ns_attr_prefix, + new_nsmap=nsmap, + ) + self._element_stack.append( + ( + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + ) + ) + yield + + # __exit__ part + self._element_stack.pop() + self._file(f"") + if elem.tail: + self._file(_escape_cdata(elem.tail)) + + def write(self, arg): + """Write a string or subelement.""" + + if isinstance(arg, str): + # it is not allowed to write a string outside of an element + if not self._element_stack: + raise LxmlSyntaxError() + self._file(_escape_cdata(arg)) + + else: + if not self._element_stack and self._have_root: + raise LxmlSyntaxError() + + if self._element_stack: + is_root = False + ( + nsmap_scope, + default_ns_attr_prefix, + uri_to_prefix, + ) = self._element_stack[-1] + else: + is_root = True + nsmap_scope = {} + default_ns_attr_prefix = None + uri_to_prefix = {} + incremental_tree._serialize_ns_xml( + self._file, + arg, + nsmap_scope=nsmap_scope, + global_nsmap=self.global_nsmap, + short_empty_elements=True, + is_html=self.is_html, + is_root=is_root, + uri_to_prefix=uri_to_prefix, + default_ns_attr_prefix=default_ns_attr_prefix, + ) + + def __enter__(self): + pass + + def __exit__(self, type, value, traceback): + # without root the xml document is incomplete + if not self._have_root: + raise LxmlSyntaxError() + + +class xmlfile(object): + """Context manager that can replace lxml.etree.xmlfile.""" + def __init__(self, output_file, buffered=False, encoding="utf-8", close=False): + self._file = output_file + self._close = close + self.encoding = encoding + self.writer_cm = None + + def __enter__(self): + self.writer_cm = incremental_tree._get_writer(self._file, encoding=self.encoding) + writer, declared_encoding = self.writer_cm.__enter__() + return _IncrementalFileWriter(writer) + + def __exit__(self, type, value, traceback): + if self.writer_cm: + self.writer_cm.__exit__(type, value, traceback) + if self._close: + self._file.close() diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5448239abe68efd89d2cd3fcbe358fb138f460bc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 scoder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/METADATA b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7298da7e2c493cab79fe083b720a994d78aa8f10 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/METADATA @@ -0,0 +1,226 @@ +Metadata-Version: 2.1 +Name: fastrlock +Version: 0.8.3 +Summary: Fast, re-entrant optimistic lock implemented in Cython +Home-page: https://github.com/scoder/fastrlock +Author: Stefan Behnel +Author-email: stefan_ml@behnel.de +License: MIT style +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development +License-File: LICENSE + +FastRLock +--------- + +This is a C-level implementation of a fast, re-entrant, optimistic lock for CPython. +It is a drop-in replacement for +`threading.RLock `_. +FastRLock is implemented in `Cython `_ and also provides a C-API +for direct use from Cython code via ``from fastrlock cimport rlock`` or +``from cython.cimports.fastrlock import rlock``. + +Under normal conditions, it is about 10x faster than ``threading.RLock`` in Python 2.7 +because it avoids all locking unless two or more threads try to acquire it at the +same time. Under congestion, it is still about 10% faster than RLock due to being +implemented in Cython. + +This is mostly equivalent to the revised RLock implementation in Python 3.2, +but still faster due to being implemented in Cython. However, in Python 3.4 and +later, the ``threading.RLock`` implementation in the stdlib tends to be as fast +or even faster than the lock provided by this package, when called through the +Python API. ``FastRLock`` is still faster also on these systems when called +through its Cython API from other Cython modules. + +It was initially published as a code recipe here: +https://code.activestate.com/recipes/577336-fast-re-entrant-optimistic-lock-implemented-in-cyt/ + +FastRLock has been used and tested in `Lupa `_ for several years. + + +How does it work? +----------------- + +The FastRLock implementation optimises for the non-congested case. It works by +exploiting the availability of the GIL. Since it knows that it holds the GIL when +the acquire()/release() methods are called, it can safely check the lock for being +held by other threads and just count any re-entries as long as it is always the +same thread that acquires it. This is a lot faster than actually acquiring the +underlying lock. + +When a second thread wants to acquire the lock as well, it first checks the lock +count and finds out that the lock is already owned. If the underlying lock is also +held by another thread already, it then just frees the GIL and asks for acquiring +the lock, just like RLock does. If the underlying lock is not held, however, it +acquires it immediately and basically hands over the ownership by telling the +current owner to free it when it's done. Then, it falls back to the normal +non-owner behaviour that asks for the lock and will eventually acquire it when it +gets released. This makes sure that the real lock is only acquired when at least +two threads want it. + +All of these operations are basically atomic because any thread that modifies the +lock state always holds the GIL. Note that the implementation must not call any +Python code while handling the lock, as calling into Python may lead to a context +switch which hands over the GIL to another thread and thus breaks atomicity. +Therefore, the code misuses Cython's 'nogil' annotation to make sure that no Python +code slips in accidentally. + + +How fast is it? +--------------- + +Here are some timings for the following scenarios: + +1) five acquire-release cycles ('lock_unlock') +2) five acquire calls followed by five release calls (nested locking, 'reentrant_lock_unlock') +3) a mixed and partly nested sequence of acquire and release calls ('mixed_lock_unlock') +4) five acquire-release cycles that do not block ('lock_unlock_nonblocking') + +All four are benchmarked for the single threaded case and the multi threaded case +with 10 threads. I also tested it with 20 threads only to see that it then takes +about twice the time for both versions. Note also that the congested case is +substantially slower for both locks and the benchmark includes the thread +creation time, so I only looped 1000x here to get useful +timings instead of 100000x for the single threaded case. + +The results here are mixed. Depending on the optimisation of the CPython +installation, it can be faster, about the same speed, or somewhat slower. +In any case, the direct Cython interface is always faster than going through +the Python API, because it avoids the Python call overhead and executes +a C call instead. + +:: + + Testing RLock (3.10.1) + + sequential (x100000): + lock_unlock : 138.36 msec + reentrant_lock_unlock : 95.35 msec + mixed_lock_unlock : 102.05 msec + lock_unlock_nonblocking : 131.44 msec + context_manager : 616.83 msec + + threaded 10T (x1000): + lock_unlock : 1386.60 msec + reentrant_lock_unlock : 1207.75 msec + mixed_lock_unlock : 1319.62 msec + lock_unlock_nonblocking : 1325.07 msec + context_manager : 1357.93 msec + + Testing FastRLock (0.8.1) + + sequential (x100000): + lock_unlock : 77.47 msec + reentrant_lock_unlock : 64.14 msec + mixed_lock_unlock : 73.51 msec + lock_unlock_nonblocking : 70.31 msec + context_manager : 393.34 msec + + threaded 10T (x1000): + lock_unlock : 1214.13 msec + reentrant_lock_unlock : 1171.75 msec + mixed_lock_unlock : 1184.33 msec + lock_unlock_nonblocking : 1207.42 msec + context_manager : 1232.20 msec + + Testing Cython interface of FastRLock (0.8.1) + + sequential (x100000): + lock_unlock : 18.70 msec + reentrant_lock_unlock : 15.88 msec + mixed_lock_unlock : 14.96 msec + lock_unlock_nonblocking : 13.47 msec + + threaded 10T (x1000): + lock_unlock : 1236.21 msec + reentrant_lock_unlock : 1245.77 msec + mixed_lock_unlock : 1194.25 msec + lock_unlock_nonblocking : 1206.96 msec + + +=================== +fastrlock changelog +=================== + +0.8.3 (2024-12-17) +================== + +* Rebuilt with Cython 3.0.11 to add Python 3.13 support. + + +0.8.2 (2023-08-27) +================== + +* Rebuilt with Cython 3.0.2 to add Python 3.12 support. + + +0.8.1 (2022-11-02) +================== + +* Rebuilt with Cython 3.0.0a11 to add Python 3.11 support. + + +0.8 (2021-10-22) +================ + +* Rebuilt with Cython 3.0.0a9 to improve the performance in recent + Python 3.x versions. + + +0.7 (2021-10-21) +================ + +* Adapted for unsigned thread IDs, as used by Py3.7+. + (original patch by Guilherme Dantas) + +* Build with Cython 0.29.24 to support Py3.10 and later. + + +0.6 (2021-03-21) +================ + +* Rebuild with Cython 0.29.22 to support Py3.9 and later. + + +0.5 (2020-06-05) +================ + +* Rebuild with Cython 0.29.20 to support Py3.8 and later. + + +0.4 (2018-08-24) +================ + +* Rebuild with Cython 0.28.5. + +* Linux wheels are faster through profile guided optimisation. + +* Add missing file to sdist. + (patch by Mark Harfouche, Github issue #5) + + +0.3 (2017-08-10) +================ + +* improve cimport support of C-API + (patch by Naotoshi Seo, Github issue #3) + +* provide ``fastrlock.__version__`` + + +0.2 (2017-08-09) +================ + +* add missing readme file to sdist + + +0.1 (2017-06-04) +================ + +* initial release diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/RECORD b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c939f7ab96dd087ab130188adc2013d9700f7062 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/RECORD @@ -0,0 +1,14 @@ +fastrlock-0.8.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fastrlock-0.8.3.dist-info/LICENSE,sha256=edWWCQqdGaUaEXXL0SQGCy8j1Pa-vqeYIkHSMRdRljA,1063 +fastrlock-0.8.3.dist-info/METADATA,sha256=CSkdXG1Tg_Nn1ar1AXfaqMPqOzGI3Er9xl1ed3brFQo,7664 +fastrlock-0.8.3.dist-info/RECORD,, +fastrlock-0.8.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastrlock-0.8.3.dist-info/WHEEL,sha256=Rk4grMJLqb8wSCjEpvXBqchuGLCY2i_LHup4dkE_8Eo,186 +fastrlock-0.8.3.dist-info/top_level.txt,sha256=QMLNNCjoisR1NTxtzPxl2Zyih9n6sFxd8VCUQzIJHOA,10 +fastrlock/__init__.pxd,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastrlock/__init__.py,sha256=lYDBBV0R1dtMBmWKorNXKhEma8Fo0OswJJW6zCSGmtU,169 +fastrlock/__pycache__/__init__.cpython-310.pyc,, +fastrlock/_lock.pxi,sha256=tPIg2qyMZbCZDEXQsp_tb_Em2J0podo3iU3-XEBdnTQ,2608 +fastrlock/rlock.cpython-310-x86_64-linux-gnu.so,sha256=r9XOGqP28vc9-BbAWmWQwb8S04o9hVHuJ9LIgRdymvA,120120 +fastrlock/rlock.pxd,sha256=slrtTC9yStpzsL9FUgoyU69D_YsJAe036GEfH6Z9a0c,313 +fastrlock/rlock.pyx,sha256=YZfaVup-Tkqb42IcNlunf4Vtt2vXVQfZPG4l9BmQlAY,3599 diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c92b8e2a7f6f02820d9b126a44128ae0dc593040 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/WHEEL @@ -0,0 +1,7 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_5_x86_64 +Tag: cp310-cp310-manylinux1_x86_64 +Tag: cp310-cp310-manylinux_2_28_x86_64 + diff --git a/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..81f32fff52cca11f37b0b8117967bf567954a64c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fastrlock-0.8.3.dist-info/top_level.txt @@ -0,0 +1 @@ +fastrlock diff --git a/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..933574613a988fc7c059edbbe3e97dfcf931937e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/_version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..530c19bc874244234d0f4a39179594eee7b8d0cf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/_version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07992a8926a420ef4b7971ddfe6da45a7a97904e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/__pycache__/utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__init__.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a0737b2d5325d5c40a8953520a260c013ed48f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/__init__.py @@ -0,0 +1,11 @@ +""" +This subpackage implements the LLVM IR classes in pure python +""" + +from .types import * +from .values import * +from .module import * +from .builder import * +from .instructions import * +from .transforms import * +from .context import Context, global_context diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2283ab28874b66416b00ef0f48bb4ebcf032cb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43dc31f9d72084ad99c0f10ac8ab56a2efa216d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/builder.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..514e39ed6944a6daa3e334f3097f4ffae4ca8ffa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/builder.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/context.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dd23888e49a452ddbc7e3b7ca30324b0adf244d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/context.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/instructions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/instructions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1d23967b8a4319de21c83fc614481e5320d4516 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/instructions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/module.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a852068390a38847084c4e6a571a2f78586fc283 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/module.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/transforms.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..017a13ccf02c5a873d809f58c1ba02c3407bf789 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/transforms.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/types.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e643f1de06c0fdb3b0cb2fc762211705e886b124 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/types.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/values.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cf908217a68948a6ec01920359c148a79c539dd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/ir/__pycache__/values.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/_utils.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8287d77afb84f750be00ef0b001a175507c59da4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/_utils.py @@ -0,0 +1,80 @@ +from collections import defaultdict + + +class DuplicatedNameError(NameError): + pass + + +class NameScope(object): + def __init__(self): + self._useset = set(['']) + self._basenamemap = defaultdict(int) + + def is_used(self, name): + return name in self._useset + + def register(self, name, deduplicate=False): + if deduplicate: + name = self.deduplicate(name) + elif self.is_used(name): + raise DuplicatedNameError(name) + self._useset.add(name) + return name + + def deduplicate(self, name): + basename = name + while self.is_used(name): + ident = self._basenamemap[basename] + 1 + self._basenamemap[basename] = ident + name = "{0}.{1}".format(basename, ident) + return name + + def get_child(self): + return type(self)(parent=self) + + +class _StrCaching(object): + + def _clear_string_cache(self): + try: + del self.__cached_str + except AttributeError: + pass + + def __str__(self): + try: + return self.__cached_str + except AttributeError: + s = self.__cached_str = self._to_string() + return s + + +class _StringReferenceCaching(object): + + def get_reference(self): + try: + return self.__cached_refstr + except AttributeError: + s = self.__cached_refstr = self._get_reference() + return s + + +class _HasMetadata(object): + + def set_metadata(self, name, node): + """ + Attach unnamed metadata *node* to the metadata slot *name* of this + value. + """ + self.metadata[name] = node + + def _stringify_metadata(self, leading_comma=False): + if self.metadata: + buf = [] + if leading_comma: + buf.append("") + buf += ["!{0} {1}".format(k, v.get_reference()) + for k, v in self.metadata.items()] + return ', '.join(buf) + else: + return '' diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/builder.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f62476ca8a4c470d7256bf9d20cf90b2bfe4682b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/builder.py @@ -0,0 +1,1119 @@ +import contextlib +import functools + +from llvmlite.ir import instructions, types, values + +_CMP_MAP = { + '>': 'gt', + '<': 'lt', + '==': 'eq', + '!=': 'ne', + '>=': 'ge', + '<=': 'le', +} + + +def _unop(opname, cls=instructions.Instruction): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, arg, name='', flags=()): + instr = cls(self.block, arg.type, opname, [arg], name, flags) + self._insert(instr) + return instr + + return wrapped + + return wrap + + +def _binop(opname, cls=instructions.Instruction): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, lhs, rhs, name='', flags=()): + if lhs.type != rhs.type: + raise ValueError("Operands must be the same type, got (%s, %s)" + % (lhs.type, rhs.type)) + instr = cls(self.block, lhs.type, opname, (lhs, rhs), name, flags) + self._insert(instr) + return instr + + return wrapped + + return wrap + + +def _binop_with_overflow(opname, cls=instructions.Instruction): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, lhs, rhs, name=''): + if lhs.type != rhs.type: + raise ValueError("Operands must be the same type, got (%s, %s)" + % (lhs.type, rhs.type)) + ty = lhs.type + if not isinstance(ty, types.IntType): + raise TypeError("expected an integer type, got %s" % (ty,)) + bool_ty = types.IntType(1) + + mod = self.module + fnty = types.FunctionType(types.LiteralStructType([ty, bool_ty]), + [ty, ty]) + fn = mod.declare_intrinsic("llvm.%s.with.overflow" % (opname,), + [ty], fnty) + ret = self.call(fn, [lhs, rhs], name=name) + return ret + + return wrapped + + return wrap + + +def _uniop(opname, cls=instructions.Instruction): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, operand, name=''): + instr = cls(self.block, operand.type, opname, [operand], name) + self._insert(instr) + return instr + + return wrapped + + return wrap + + +def _uniop_intrinsic_int(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, operand, name=''): + if not isinstance(operand.type, types.IntType): + raise TypeError( + "expected an integer type, got %s" % + operand.type) + fn = self.module.declare_intrinsic(opname, [operand.type]) + return self.call(fn, [operand], name) + + return wrapped + + return wrap + + +def _uniop_intrinsic_float(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, operand, name=''): + if not isinstance( + operand.type, (types.FloatType, types.DoubleType)): + raise TypeError("expected a float type, got %s" % operand.type) + fn = self.module.declare_intrinsic(opname, [operand.type]) + return self.call(fn, [operand], name) + + return wrapped + + return wrap + + +def _uniop_intrinsic_with_flag(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, operand, flag, name=''): + if not isinstance(operand.type, types.IntType): + raise TypeError( + "expected an integer type, got %s" % + operand.type) + if not (isinstance(flag.type, types.IntType) and + flag.type.width == 1): + raise TypeError("expected an i1 type, got %s" % flag.type) + fn = self.module.declare_intrinsic( + opname, [operand.type, flag.type]) + return self.call(fn, [operand, flag], name) + + return wrapped + + return wrap + + +def _triop_intrinsic(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, a, b, c, name=''): + if a.type != b.type or b.type != c.type: + raise TypeError( + "expected types to be the same, got %s, %s, %s" % ( + a.type, + b.type, + c.type)) + elif not isinstance( + a.type, + (types.HalfType, types.FloatType, types.DoubleType)): + raise TypeError( + "expected an floating point type, got %s" % + a.type) + fn = self.module.declare_intrinsic(opname, [a.type, b.type, c.type]) + return self.call(fn, [a, b, c], name) + + return wrapped + + return wrap + + +def _castop(opname, cls=instructions.CastInstr): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, val, typ, name=''): + if val.type == typ: + return val + instr = cls(self.block, opname, val, typ, name) + self._insert(instr) + return instr + + return wrapped + + return wrap + + +def _label_suffix(label, suffix): + """Returns (label + suffix) or a truncated version if it's too long. + Parameters + ---------- + label : str + Label name + suffix : str + Label suffix + """ + if len(label) > 50: + nhead = 25 + return ''.join([label[:nhead], '..', suffix]) + else: + return label + suffix + + +class IRBuilder(object): + def __init__(self, block=None): + self._block = block + self._anchor = len(block.instructions) if block else 0 + self.debug_metadata = None + + @property + def block(self): + """ + The current basic block. + """ + return self._block + + basic_block = block + + @property + def function(self): + """ + The current function. + """ + return self.block.parent + + @property + def module(self): + """ + The current module. + """ + return self.block.parent.module + + def position_before(self, instr): + """ + Position immediately before the given instruction. The current block + is also changed to the instruction's basic block. + """ + self._block = instr.parent + self._anchor = self._block.instructions.index(instr) + + def position_after(self, instr): + """ + Position immediately after the given instruction. The current block + is also changed to the instruction's basic block. + """ + self._block = instr.parent + self._anchor = self._block.instructions.index(instr) + 1 + + def position_at_start(self, block): + """ + Position at the start of the basic *block*. + """ + self._block = block + self._anchor = 0 + + def position_at_end(self, block): + """ + Position at the end of the basic *block*. + """ + self._block = block + self._anchor = len(block.instructions) + + def append_basic_block(self, name=''): + """ + Append a basic block, with the given optional *name*, to the current + function. The current block is not changed. The new block is returned. + """ + return self.function.append_basic_block(name) + + def remove(self, instr): + """Remove the given instruction.""" + idx = self._block.instructions.index(instr) + del self._block.instructions[idx] + if self._block.terminator == instr: + self._block.terminator = None + if self._anchor > idx: + self._anchor -= 1 + + @contextlib.contextmanager + def goto_block(self, block): + """ + A context manager which temporarily positions the builder at the end + of basic block *bb* (but before any terminator). + """ + old_block = self.basic_block + term = block.terminator + if term is not None: + self.position_before(term) + else: + self.position_at_end(block) + try: + yield + finally: + self.position_at_end(old_block) + + @contextlib.contextmanager + def goto_entry_block(self): + """ + A context manager which temporarily positions the builder at the + end of the function's entry block. + """ + with self.goto_block(self.function.entry_basic_block): + yield + + @contextlib.contextmanager + def _branch_helper(self, bbenter, bbexit): + self.position_at_end(bbenter) + yield bbexit + if self.basic_block.terminator is None: + self.branch(bbexit) + + @contextlib.contextmanager + def if_then(self, pred, likely=None): + """ + A context manager which sets up a conditional basic block based + on the given predicate (a i1 value). If the conditional block + is not explicitly terminated, a branch will be added to the next + block. + If *likely* is given, its boolean value indicates whether the + predicate is likely to be true or not, and metadata is issued + for LLVM's optimizers to account for that. + """ + bb = self.basic_block + bbif = self.append_basic_block(name=_label_suffix(bb.name, '.if')) + bbend = self.append_basic_block(name=_label_suffix(bb.name, '.endif')) + br = self.cbranch(pred, bbif, bbend) + if likely is not None: + br.set_weights([99, 1] if likely else [1, 99]) + + with self._branch_helper(bbif, bbend): + yield bbend + + self.position_at_end(bbend) + + @contextlib.contextmanager + def if_else(self, pred, likely=None): + """ + A context manager which sets up two conditional basic blocks based + on the given predicate (a i1 value). + A tuple of context managers is yield'ed. Each context manager + acts as a if_then() block. + *likely* has the same meaning as in if_then(). + + Typical use:: + with builder.if_else(pred) as (then, otherwise): + with then: + # emit instructions for when the predicate is true + with otherwise: + # emit instructions for when the predicate is false + """ + bb = self.basic_block + bbif = self.append_basic_block(name=_label_suffix(bb.name, '.if')) + bbelse = self.append_basic_block(name=_label_suffix(bb.name, '.else')) + bbend = self.append_basic_block(name=_label_suffix(bb.name, '.endif')) + br = self.cbranch(pred, bbif, bbelse) + if likely is not None: + br.set_weights([99, 1] if likely else [1, 99]) + + then = self._branch_helper(bbif, bbend) + otherwise = self._branch_helper(bbelse, bbend) + + yield then, otherwise + + self.position_at_end(bbend) + + def _insert(self, instr): + if self.debug_metadata is not None and 'dbg' not in instr.metadata: + instr.metadata['dbg'] = self.debug_metadata + self._block.instructions.insert(self._anchor, instr) + self._anchor += 1 + + def _set_terminator(self, term): + assert not self.block.is_terminated + self._insert(term) + self.block.terminator = term + return term + + # + # Arithmetic APIs + # + + @_binop('shl') + def shl(self, lhs, rhs, name=''): + """ + Left integer shift: + name = lhs << rhs + """ + + @_binop('lshr') + def lshr(self, lhs, rhs, name=''): + """ + Logical (unsigned) right integer shift: + name = lhs >> rhs + """ + + @_binop('ashr') + def ashr(self, lhs, rhs, name=''): + """ + Arithmetic (signed) right integer shift: + name = lhs >> rhs + """ + + @_binop('add') + def add(self, lhs, rhs, name=''): + """ + Integer addition: + name = lhs + rhs + """ + + @_binop('fadd') + def fadd(self, lhs, rhs, name=''): + """ + Floating-point addition: + name = lhs + rhs + """ + + @_binop('sub') + def sub(self, lhs, rhs, name=''): + """ + Integer subtraction: + name = lhs - rhs + """ + + @_binop('fsub') + def fsub(self, lhs, rhs, name=''): + """ + Floating-point subtraction: + name = lhs - rhs + """ + + @_binop('mul') + def mul(self, lhs, rhs, name=''): + """ + Integer multiplication: + name = lhs * rhs + """ + + @_binop('fmul') + def fmul(self, lhs, rhs, name=''): + """ + Floating-point multiplication: + name = lhs * rhs + """ + + @_binop('udiv') + def udiv(self, lhs, rhs, name=''): + """ + Unsigned integer division: + name = lhs / rhs + """ + + @_binop('sdiv') + def sdiv(self, lhs, rhs, name=''): + """ + Signed integer division: + name = lhs / rhs + """ + + @_binop('fdiv') + def fdiv(self, lhs, rhs, name=''): + """ + Floating-point division: + name = lhs / rhs + """ + + @_binop('urem') + def urem(self, lhs, rhs, name=''): + """ + Unsigned integer remainder: + name = lhs % rhs + """ + + @_binop('srem') + def srem(self, lhs, rhs, name=''): + """ + Signed integer remainder: + name = lhs % rhs + """ + + @_binop('frem') + def frem(self, lhs, rhs, name=''): + """ + Floating-point remainder: + name = lhs % rhs + """ + + @_binop('or') + def or_(self, lhs, rhs, name=''): + """ + Bitwise integer OR: + name = lhs | rhs + """ + + @_binop('and') + def and_(self, lhs, rhs, name=''): + """ + Bitwise integer AND: + name = lhs & rhs + """ + + @_binop('xor') + def xor(self, lhs, rhs, name=''): + """ + Bitwise integer XOR: + name = lhs ^ rhs + """ + + @_binop_with_overflow('sadd') + def sadd_with_overflow(self, lhs, rhs, name=''): + """ + Signed integer addition with overflow: + name = {result, overflow bit} = lhs + rhs + """ + + @_binop_with_overflow('smul') + def smul_with_overflow(self, lhs, rhs, name=''): + """ + Signed integer multiplication with overflow: + name = {result, overflow bit} = lhs * rhs + """ + + @_binop_with_overflow('ssub') + def ssub_with_overflow(self, lhs, rhs, name=''): + """ + Signed integer subtraction with overflow: + name = {result, overflow bit} = lhs - rhs + """ + + @_binop_with_overflow('uadd') + def uadd_with_overflow(self, lhs, rhs, name=''): + """ + Unsigned integer addition with overflow: + name = {result, overflow bit} = lhs + rhs + """ + + @_binop_with_overflow('umul') + def umul_with_overflow(self, lhs, rhs, name=''): + """ + Unsigned integer multiplication with overflow: + name = {result, overflow bit} = lhs * rhs + """ + + @_binop_with_overflow('usub') + def usub_with_overflow(self, lhs, rhs, name=''): + """ + Unsigned integer subtraction with overflow: + name = {result, overflow bit} = lhs - rhs + """ + + # + # Unary APIs + # + + def not_(self, value, name=''): + """ + Bitwise integer complement: + name = ~value + """ + if isinstance(value.type, types.VectorType): + rhs = values.Constant(value.type, (-1,) * value.type.count) + else: + rhs = values.Constant(value.type, -1) + return self.xor(value, rhs, name=name) + + def neg(self, value, name=''): + """ + Integer negative: + name = -value + """ + return self.sub(values.Constant(value.type, 0), value, name=name) + + @_unop('fneg') + def fneg(self, arg, name='', flags=()): + """ + Floating-point negative: + name = -arg + """ + + # + # Comparison APIs + # + + def _icmp(self, prefix, cmpop, lhs, rhs, name): + try: + op = _CMP_MAP[cmpop] + except KeyError: + raise ValueError("invalid comparison %r for icmp" % (cmpop,)) + if cmpop not in ('==', '!='): + op = prefix + op + instr = instructions.ICMPInstr(self.block, op, lhs, rhs, name=name) + self._insert(instr) + return instr + + def icmp_signed(self, cmpop, lhs, rhs, name=''): + """ + Signed integer comparison: + name = lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=' + """ + return self._icmp('s', cmpop, lhs, rhs, name) + + def icmp_unsigned(self, cmpop, lhs, rhs, name=''): + """ + Unsigned integer (or pointer) comparison: + name = lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=' + """ + return self._icmp('u', cmpop, lhs, rhs, name) + + def fcmp_ordered(self, cmpop, lhs, rhs, name='', flags=()): + """ + Floating-point ordered comparison: + name = lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno' + """ + if cmpop in _CMP_MAP: + op = 'o' + _CMP_MAP[cmpop] + else: + op = cmpop + instr = instructions.FCMPInstr( + self.block, op, lhs, rhs, name=name, flags=flags) + self._insert(instr) + return instr + + def fcmp_unordered(self, cmpop, lhs, rhs, name='', flags=()): + """ + Floating-point unordered comparison: + name = lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno' + """ + if cmpop in _CMP_MAP: + op = 'u' + _CMP_MAP[cmpop] + else: + op = cmpop + instr = instructions.FCMPInstr( + self.block, op, lhs, rhs, name=name, flags=flags) + self._insert(instr) + return instr + + def select(self, cond, lhs, rhs, name='', flags=()): + """ + Ternary select operator: + name = cond ? lhs : rhs + """ + instr = instructions.SelectInstr(self.block, cond, lhs, rhs, name=name, + flags=flags) + self._insert(instr) + return instr + + # + # Cast APIs + # + + @_castop('trunc') + def trunc(self, value, typ, name=''): + """ + Truncating integer downcast to a smaller type: + name = (typ) value + """ + + @_castop('zext') + def zext(self, value, typ, name=''): + """ + Zero-extending integer upcast to a larger type: + name = (typ) value + """ + + @_castop('sext') + def sext(self, value, typ, name=''): + """ + Sign-extending integer upcast to a larger type: + name = (typ) value + """ + + @_castop('fptrunc') + def fptrunc(self, value, typ, name=''): + """ + Floating-point downcast to a less precise type: + name = (typ) value + """ + + @_castop('fpext') + def fpext(self, value, typ, name=''): + """ + Floating-point upcast to a more precise type: + name = (typ) value + """ + + @_castop('bitcast') + def bitcast(self, value, typ, name=''): + """ + Pointer cast to a different pointer type: + name = (typ) value + """ + + @_castop('addrspacecast') + def addrspacecast(self, value, typ, name=''): + """ + Pointer cast to a different address space: + name = (typ) value + """ + + @_castop('fptoui') + def fptoui(self, value, typ, name=''): + """ + Convert floating-point to unsigned integer: + name = (typ) value + """ + + @_castop('uitofp') + def uitofp(self, value, typ, name=''): + """ + Convert unsigned integer to floating-point: + name = (typ) value + """ + + @_castop('fptosi') + def fptosi(self, value, typ, name=''): + """ + Convert floating-point to signed integer: + name = (typ) value + """ + + @_castop('sitofp') + def sitofp(self, value, typ, name=''): + """ + Convert signed integer to floating-point: + name = (typ) value + """ + + @_castop('ptrtoint') + def ptrtoint(self, value, typ, name=''): + """ + Cast pointer to integer: + name = (typ) value + """ + + @_castop('inttoptr') + def inttoptr(self, value, typ, name=''): + """ + Cast integer to pointer: + name = (typ) value + """ + + # + # Memory APIs + # + + def alloca(self, typ, size=None, name=''): + """ + Stack-allocate a slot for *size* elements of the given type. + (default one element) + """ + if size is None: + pass + elif isinstance(size, (values.Value, values.Constant)): + assert isinstance(size.type, types.IntType) + else: + # If it is not a Value instance, + # assume to be a Python integer. + size = values.Constant(types.IntType(32), size) + + al = instructions.AllocaInstr(self.block, typ, size, name) + self._insert(al) + return al + + def load(self, ptr, name='', align=None): + """ + Load value from pointer, with optional guaranteed alignment: + name = *ptr + """ + if not isinstance(ptr.type, types.PointerType): + msg = "cannot load from value of type %s (%r): not a pointer" + raise TypeError(msg % (ptr.type, str(ptr))) + ld = instructions.LoadInstr(self.block, ptr, name) + ld.align = align + self._insert(ld) + return ld + + def store(self, value, ptr, align=None): + """ + Store value to pointer, with optional guaranteed alignment: + *ptr = name + """ + if not isinstance(ptr.type, types.PointerType): + msg = "cannot store to value of type %s (%r): not a pointer" + raise TypeError(msg % (ptr.type, str(ptr))) + if ptr.type.pointee != value.type: + raise TypeError("cannot store %s to %s: mismatching types" + % (value.type, ptr.type)) + st = instructions.StoreInstr(self.block, value, ptr) + st.align = align + self._insert(st) + return st + + def load_atomic(self, ptr, ordering, align, name=''): + """ + Load value from pointer, with optional guaranteed alignment: + name = *ptr + """ + if not isinstance(ptr.type, types.PointerType): + msg = "cannot load from value of type %s (%r): not a pointer" + raise TypeError(msg % (ptr.type, str(ptr))) + ld = instructions.LoadAtomicInstr( + self.block, ptr, ordering, align, name) + self._insert(ld) + return ld + + def store_atomic(self, value, ptr, ordering, align): + """ + Store value to pointer, with optional guaranteed alignment: + *ptr = name + """ + if not isinstance(ptr.type, types.PointerType): + msg = "cannot store to value of type %s (%r): not a pointer" + raise TypeError(msg % (ptr.type, str(ptr))) + if ptr.type.pointee != value.type: + raise TypeError("cannot store %s to %s: mismatching types" + % (value.type, ptr.type)) + st = instructions.StoreAtomicInstr( + self.block, value, ptr, ordering, align) + self._insert(st) + return st + + # + # Terminators APIs + # + + def switch(self, value, default): + """ + Create a switch-case with a single *default* target. + """ + swt = instructions.SwitchInstr(self.block, 'switch', value, default) + self._set_terminator(swt) + return swt + + def branch(self, target): + """ + Unconditional branch to *target*. + """ + br = instructions.Branch(self.block, "br", [target]) + self._set_terminator(br) + return br + + def cbranch(self, cond, truebr, falsebr): + """ + Conditional branch to *truebr* if *cond* is true, else to *falsebr*. + """ + br = instructions.ConditionalBranch(self.block, "br", + [cond, truebr, falsebr]) + self._set_terminator(br) + return br + + def branch_indirect(self, addr): + """ + Indirect branch to target *addr*. + """ + br = instructions.IndirectBranch(self.block, "indirectbr", addr) + self._set_terminator(br) + return br + + def ret_void(self): + """ + Return from function without a value. + """ + return self._set_terminator( + instructions.Ret(self.block, "ret void")) + + def ret(self, value): + """ + Return from function with the given *value*. + """ + return self._set_terminator( + instructions.Ret(self.block, "ret", value)) + + def resume(self, landingpad): + """ + Resume an in-flight exception. + """ + br = instructions.Branch(self.block, "resume", [landingpad]) + self._set_terminator(br) + return br + + # Call APIs + + def call(self, fn, args, name='', cconv=None, tail=False, fastmath=(), + attrs=(), arg_attrs=None): + """ + Call function *fn* with *args*: + name = fn(args...) + """ + inst = instructions.CallInstr(self.block, fn, args, name=name, + cconv=cconv, tail=tail, fastmath=fastmath, + attrs=attrs, arg_attrs=arg_attrs) + self._insert(inst) + return inst + + def asm(self, ftype, asm, constraint, args, side_effect, name=''): + """ + Inline assembler. + """ + asm = instructions.InlineAsm(ftype, asm, constraint, side_effect) + return self.call(asm, args, name) + + def load_reg(self, reg_type, reg_name, name=''): + """ + Load a register value into an LLVM value. + Example: v = load_reg(IntType(32), "eax") + """ + ftype = types.FunctionType(reg_type, []) + return self.asm(ftype, "", "={%s}" % reg_name, [], False, name) + + def store_reg(self, value, reg_type, reg_name, name=''): + """ + Store an LLVM value inside a register + Example: + store_reg(Constant(IntType(32), 0xAAAAAAAA), IntType(32), "eax") + """ + ftype = types.FunctionType(types.VoidType(), [reg_type]) + return self.asm(ftype, "", "{%s}" % reg_name, [value], True, name) + + def invoke(self, fn, args, normal_to, unwind_to, + name='', cconv=None, fastmath=(), attrs=(), arg_attrs=None): + inst = instructions.InvokeInstr(self.block, fn, args, normal_to, + unwind_to, name=name, cconv=cconv, + fastmath=fastmath, attrs=attrs, + arg_attrs=arg_attrs) + self._set_terminator(inst) + return inst + + # GEP APIs + + def gep(self, ptr, indices, inbounds=False, name=''): + """ + Compute effective address (getelementptr): + name = getelementptr ptr, + """ + instr = instructions.GEPInstr(self.block, ptr, indices, + inbounds=inbounds, name=name) + self._insert(instr) + return instr + + # Vector Operations APIs + + def extract_element(self, vector, idx, name=''): + """ + Returns the value at position idx. + """ + instr = instructions.ExtractElement(self.block, vector, idx, name=name) + self._insert(instr) + return instr + + def insert_element(self, vector, value, idx, name=''): + """ + Returns vector with vector[idx] replaced by value. + The result is undefined if the idx is larger or equal the vector length. + """ + instr = instructions.InsertElement(self.block, vector, value, idx, + name=name) + self._insert(instr) + return instr + + def shuffle_vector(self, vector1, vector2, mask, name=''): + """ + Constructs a permutation of elements from *vector1* and *vector2*. + Returns a new vector in the same length of *mask*. + + * *vector1* and *vector2* must have the same element type. + * *mask* must be a constant vector of integer types. + """ + instr = instructions.ShuffleVector(self.block, vector1, vector2, mask, + name=name) + self._insert(instr) + return instr + + # Aggregate APIs + + def extract_value(self, agg, idx, name=''): + """ + Extract member number *idx* from aggregate. + """ + if not isinstance(idx, (tuple, list)): + idx = [idx] + instr = instructions.ExtractValue(self.block, agg, idx, name=name) + self._insert(instr) + return instr + + def insert_value(self, agg, value, idx, name=''): + """ + Insert *value* into member number *idx* from aggregate. + """ + if not isinstance(idx, (tuple, list)): + idx = [idx] + instr = instructions.InsertValue(self.block, agg, value, idx, name=name) + self._insert(instr) + return instr + + # PHI APIs + + def phi(self, typ, name='', flags=()): + inst = instructions.PhiInstr(self.block, typ, name=name, flags=flags) + self._insert(inst) + return inst + + # Special API + + def unreachable(self): + inst = instructions.Unreachable(self.block) + self._set_terminator(inst) + return inst + + def atomic_rmw(self, op, ptr, val, ordering, name=''): + inst = instructions.AtomicRMW( + self.block, op, ptr, val, ordering, name=name) + self._insert(inst) + return inst + + def cmpxchg(self, ptr, cmp, val, ordering, failordering=None, name=''): + """ + Atomic compared-and-set: + atomic { + old = *ptr + success = (old == cmp) + if (success) + *ptr = val + } + name = { old, success } + + If failordering is `None`, the value of `ordering` is used. + """ + failordering = ordering if failordering is None else failordering + inst = instructions.CmpXchg(self.block, ptr, cmp, val, ordering, + failordering, name=name) + self._insert(inst) + return inst + + def landingpad(self, typ, name='', cleanup=False): + inst = instructions.LandingPadInstr(self.block, typ, name, cleanup) + self._insert(inst) + return inst + + def assume(self, cond): + """ + Optimizer hint: assume *cond* is always true. + """ + fn = self.module.declare_intrinsic("llvm.assume") + return self.call(fn, [cond]) + + def fence(self, ordering, targetscope=None, name=''): + """ + Add a memory barrier, preventing certain reorderings of load and/or + store accesses with + respect to other processors and devices. + """ + inst = instructions.Fence(self.block, ordering, targetscope, name=name) + self._insert(inst) + return inst + + def comment(self, text): + """ + Puts a single-line comment into the generated IR. This will be ignored + by LLVM, but can be useful for debugging the output of a compiler. Adds + a comment to the source file. + + * *text* is a string that does not contain new line characters. + """ + inst = instructions.Comment(self.block, text) + self._insert(inst) + return inst + + @_uniop_intrinsic_int("llvm.bswap") + def bswap(self, cond): + """ + Used to byte swap integer values with an even number of bytes (positive + multiple of 16 bits) + """ + + @_uniop_intrinsic_int("llvm.bitreverse") + def bitreverse(self, cond): + """ + Reverse the bitpattern of an integer value; for example 0b10110110 + becomes 0b01101101. + """ + + @_uniop_intrinsic_int("llvm.ctpop") + def ctpop(self, cond): + """ + Counts the number of bits set in a value. + """ + + @_uniop_intrinsic_with_flag("llvm.ctlz") + def ctlz(self, cond, flag): + """ + Counts leading zero bits in *value*. Boolean *flag* indicates whether + the result is defined for ``0``. + """ + + @_uniop_intrinsic_with_flag("llvm.cttz") + def cttz(self, cond, flag): + """ + Counts trailing zero bits in *value*. Boolean *flag* indicates whether + the result is defined for ``0``. + """ + + @_triop_intrinsic("llvm.fma") + def fma(self, a, b, c): + """ + Perform the fused multiply-add operation. + """ + + def convert_from_fp16(self, a, to=None, name=''): + """ + Convert from an i16 to the given FP type + """ + if not to: + raise TypeError("expected a float return type") + if not isinstance(to, (types.FloatType, types.DoubleType)): + raise TypeError("expected a float type, got %s" % to) + if not (isinstance(a.type, types.IntType) and a.type.width == 16): + raise TypeError("expected an i16 type, got %s" % a.type) + + opname = 'llvm.convert.from.fp16' + fn = self.module.declare_intrinsic(opname, [to]) + return self.call(fn, [a], name) + + @_uniop_intrinsic_float("llvm.convert.to.fp16") + def convert_to_fp16(self, a): + """ + Convert the given FP number to an i16 + """ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/context.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/context.py new file mode 100644 index 0000000000000000000000000000000000000000..47d1ebbe2953d160f4dbb38b846b6d1d46582861 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/context.py @@ -0,0 +1,20 @@ +from llvmlite.ir import _utils +from llvmlite.ir import types + + +class Context(object): + def __init__(self): + self.scope = _utils.NameScope() + self.identified_types = {} + + def get_identified_type(self, name): + if name not in self.identified_types: + self.scope.register(name) + ty = types.IdentifiedStructType(self, name) + self.identified_types[name] = ty + else: + ty = self.identified_types[name] + return ty + + +global_context = Context() diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/instructions.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/instructions.py new file mode 100644 index 0000000000000000000000000000000000000000..c6d488aae092b9a4f172e20eb0ac826881c60ef4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/instructions.py @@ -0,0 +1,893 @@ +""" +Implementation of LLVM IR instructions. +""" + +from llvmlite.ir import types +from llvmlite.ir.values import (Block, Function, Value, NamedValue, Constant, + MetaDataArgument, MetaDataString, AttributeSet, + Undefined, ArgumentAttributes) +from llvmlite.ir._utils import _HasMetadata + + +class Instruction(NamedValue, _HasMetadata): + def __init__(self, parent, typ, opname, operands, name='', flags=()): + super(Instruction, self).__init__(parent, typ, name=name) + assert isinstance(parent, Block) + assert isinstance(flags, (tuple, list)) + self.opname = opname + self.operands = operands + self.flags = list(flags) + self.metadata = {} + + @property + def function(self): + return self.parent.function + + @property + def module(self): + return self.parent.function.module + + def descr(self, buf): + opname = self.opname + if self.flags: + opname = ' '.join([opname] + self.flags) + operands = ', '.join([op.get_reference() for op in self.operands]) + typ = self.type + metadata = self._stringify_metadata(leading_comma=True) + buf.append("{0} {1} {2}{3}\n" + .format(opname, typ, operands, metadata)) + + def replace_usage(self, old, new): + if old in self.operands: + ops = [] + for op in self.operands: + ops.append(new if op is old else op) + self.operands = tuple(ops) + self._clear_string_cache() + + def __repr__(self): + return "" % ( + self.__class__.__name__, self.name, self.type, + self.opname, self.operands) + + +class CallInstrAttributes(AttributeSet): + _known = frozenset(['convergent', 'noreturn', 'nounwind', 'readonly', + 'readnone', 'noinline', 'alwaysinline']) + + +TailMarkerOptions = frozenset(['tail', 'musttail', 'notail']) + + +class FastMathFlags(AttributeSet): + _known = frozenset(['fast', 'nnan', 'ninf', 'nsz', 'arcp', 'contract', + 'afn', 'reassoc']) + + +class CallInstr(Instruction): + def __init__(self, parent, func, args, name='', cconv=None, tail=None, + fastmath=(), attrs=(), arg_attrs=None): + self.cconv = (func.calling_convention + if cconv is None and isinstance(func, Function) + else cconv) + + # For backwards compatibility with previous API of accepting a "truthy" + # value for a hint to the optimizer to potentially tail optimize. + if isinstance(tail, str) and tail in TailMarkerOptions: + pass + elif tail: + tail = "tail" + else: + tail = "" + + self.tail = tail + self.fastmath = FastMathFlags(fastmath) + self.attributes = CallInstrAttributes(attrs) + self.arg_attributes = {} + if arg_attrs: + for idx, attrs in arg_attrs.items(): + if not (0 <= idx < len(args)): + raise ValueError("Invalid argument index {}" + .format(idx)) + self.arg_attributes[idx] = ArgumentAttributes(attrs) + + # Fix and validate arguments + args = list(args) + for i in range(len(func.function_type.args)): + arg = args[i] + expected_type = func.function_type.args[i] + if (isinstance(expected_type, types.MetaDataType) and + arg.type != expected_type): + arg = MetaDataArgument(arg) + if arg.type != expected_type: + msg = ("Type of #{0} arg mismatch: {1} != {2}" + .format(1 + i, expected_type, arg.type)) + raise TypeError(msg) + args[i] = arg + + super(CallInstr, self).__init__(parent, func.function_type.return_type, + "call", [func] + list(args), name=name) + + @property + def callee(self): + return self.operands[0] + + @callee.setter + def callee(self, newcallee): + self.operands[0] = newcallee + + @property + def args(self): + return self.operands[1:] + + def replace_callee(self, newfunc): + if newfunc.function_type != self.callee.function_type: + raise TypeError("New function has incompatible type") + self.callee = newfunc + + @property + def called_function(self): + """The callee function""" + return self.callee + + def _descr(self, buf, add_metadata): + def descr_arg(i, a): + if i in self.arg_attributes: + attrs = ' '.join(self.arg_attributes[i]._to_list(a.type)) + ' ' + else: + attrs = '' + return '{0} {1}{2}'.format(a.type, attrs, a.get_reference()) + args = ', '.join([descr_arg(i, a) for i, a in enumerate(self.args)]) + + fnty = self.callee.function_type + # Only print function type if variable-argument + if fnty.var_arg: + ty = fnty + # Otherwise, just print the return type. + else: + # Fastmath flag work only in this case + ty = fnty.return_type + callee_ref = "{0} {1}".format(ty, self.callee.get_reference()) + if self.cconv: + callee_ref = "{0} {1}".format(self.cconv, callee_ref) + + tail_marker = "" + if self.tail: + tail_marker = "{0} ".format(self.tail) + + fn_attrs = ' ' + ' '.join(self.attributes._to_list(fnty.return_type))\ + if self.attributes else '' + + fm_attrs = ' ' + ' '.join(self.fastmath._to_list(fnty.return_type))\ + if self.fastmath else '' + + buf.append("{tail}{op}{fastmath} {callee}({args}){attr}{meta}\n".format( + tail=tail_marker, + op=self.opname, + callee=callee_ref, + fastmath=fm_attrs, + args=args, + attr=fn_attrs, + meta=(self._stringify_metadata(leading_comma=True) + if add_metadata else ""), + )) + + def descr(self, buf): + self._descr(buf, add_metadata=True) + + +class InvokeInstr(CallInstr): + def __init__(self, parent, func, args, normal_to, unwind_to, name='', + cconv=None, fastmath=(), attrs=(), arg_attrs=None): + assert isinstance(normal_to, Block) + assert isinstance(unwind_to, Block) + super(InvokeInstr, self).__init__(parent, func, args, name, cconv, + tail=False, fastmath=fastmath, + attrs=attrs, arg_attrs=arg_attrs) + self.opname = "invoke" + self.normal_to = normal_to + self.unwind_to = unwind_to + + def descr(self, buf): + super(InvokeInstr, self)._descr(buf, add_metadata=False) + buf.append(" to label {0} unwind label {1}{metadata}\n".format( + self.normal_to.get_reference(), + self.unwind_to.get_reference(), + metadata=self._stringify_metadata(leading_comma=True), + )) + + +class Terminator(Instruction): + def __init__(self, parent, opname, operands): + super(Terminator, self).__init__(parent, types.VoidType(), opname, + operands) + + def descr(self, buf): + opname = self.opname + operands = ', '.join(["{0} {1}".format(op.type, op.get_reference()) + for op in self.operands]) + metadata = self._stringify_metadata(leading_comma=True) + buf.append("{0} {1}{2}".format(opname, operands, metadata)) + + +class PredictableInstr(Instruction): + + def set_weights(self, weights): + operands = [MetaDataString(self.module, "branch_weights")] + for w in weights: + if w < 0: + raise ValueError("branch weight must be a positive integer") + operands.append(Constant(types.IntType(32), w)) + md = self.module.add_metadata(operands) + self.set_metadata("prof", md) + + +class Ret(Terminator): + def __init__(self, parent, opname, return_value=None): + operands = [return_value] if return_value is not None else [] + super(Ret, self).__init__(parent, opname, operands) + + @property + def return_value(self): + if self.operands: + return self.operands[0] + else: + return None + + def descr(self, buf): + return_value = self.return_value + metadata = self._stringify_metadata(leading_comma=True) + if return_value is not None: + buf.append("{0} {1} {2}{3}\n" + .format(self.opname, return_value.type, + return_value.get_reference(), + metadata)) + else: + buf.append("{0}{1}\n".format(self.opname, metadata)) + + +class Branch(Terminator): + pass + + +class ConditionalBranch(PredictableInstr, Terminator): + pass + + +class IndirectBranch(PredictableInstr, Terminator): + def __init__(self, parent, opname, addr): + super(IndirectBranch, self).__init__(parent, opname, [addr]) + self.destinations = [] + + @property + def address(self): + return self.operands[0] + + def add_destination(self, block): + assert isinstance(block, Block) + self.destinations.append(block) + + def descr(self, buf): + destinations = ["label {0}".format(blk.get_reference()) + for blk in self.destinations] + buf.append("indirectbr {0} {1}, [{2}] {3}\n".format( + self.address.type, + self.address.get_reference(), + ', '.join(destinations), + self._stringify_metadata(leading_comma=True), + )) + + +class SwitchInstr(PredictableInstr, Terminator): + + def __init__(self, parent, opname, val, default): + super(SwitchInstr, self).__init__(parent, opname, [val]) + self.default = default + self.cases = [] + + @property + def value(self): + return self.operands[0] + + def add_case(self, val, block): + assert isinstance(block, Block) + if not isinstance(val, Value): + val = Constant(self.value.type, val) + self.cases.append((val, block)) + + def descr(self, buf): + cases = ["{0} {1}, label {2}".format(val.type, val.get_reference(), + blk.get_reference()) + for val, blk in self.cases] + buf.append("switch {0} {1}, label {2} [{3}] {4}\n".format( + self.value.type, + self.value.get_reference(), + self.default.get_reference(), + ' '.join(cases), + self._stringify_metadata(leading_comma=True), + )) + + +class Resume(Terminator): + pass + + +class SelectInstr(Instruction): + def __init__(self, parent, cond, lhs, rhs, name='', flags=()): + assert lhs.type == rhs.type + super(SelectInstr, self).__init__(parent, lhs.type, "select", + [cond, lhs, rhs], name=name, + flags=flags) + + @property + def cond(self): + return self.operands[0] + + @property + def lhs(self): + return self.operands[1] + + @property + def rhs(self): + return self.operands[2] + + def descr(self, buf): + buf.append("select {0} {1} {2}, {3} {4}, {5} {6} {7}\n".format( + ' '.join(self.flags), + self.cond.type, self.cond.get_reference(), + self.lhs.type, self.lhs.get_reference(), + self.rhs.type, self.rhs.get_reference(), + self._stringify_metadata(leading_comma=True), + )) + + +class CompareInstr(Instruction): + # Define the following in subclasses + OPNAME = 'invalid-compare' + VALID_OP = {} + + def __init__(self, parent, op, lhs, rhs, name='', flags=[]): + if op not in self.VALID_OP: + raise ValueError("invalid comparison %r for %s" % (op, self.OPNAME)) + for flag in flags: + if flag not in self.VALID_FLAG: + raise ValueError("invalid flag %r for %s" % (flag, self.OPNAME)) + opname = self.OPNAME + if isinstance(lhs.type, types.VectorType): + typ = types.VectorType(types.IntType(1), lhs.type.count) + else: + typ = types.IntType(1) + super(CompareInstr, self).__init__(parent, typ, + opname, [lhs, rhs], flags=flags, + name=name) + self.op = op + + def descr(self, buf): + buf.append("{opname}{flags} {op} {ty} {lhs}, {rhs} {meta}\n".format( + opname=self.opname, + flags=''.join(' ' + it for it in self.flags), + op=self.op, + ty=self.operands[0].type, + lhs=self.operands[0].get_reference(), + rhs=self.operands[1].get_reference(), + meta=self._stringify_metadata(leading_comma=True), + )) + + +class ICMPInstr(CompareInstr): + OPNAME = 'icmp' + VALID_OP = { + 'eq': 'equal', + 'ne': 'not equal', + 'ugt': 'unsigned greater than', + 'uge': 'unsigned greater or equal', + 'ult': 'unsigned less than', + 'ule': 'unsigned less or equal', + 'sgt': 'signed greater than', + 'sge': 'signed greater or equal', + 'slt': 'signed less than', + 'sle': 'signed less or equal', + } + VALID_FLAG = set() + + +class FCMPInstr(CompareInstr): + OPNAME = 'fcmp' + VALID_OP = { + 'false': 'no comparison, always returns false', + 'oeq': 'ordered and equal', + 'ogt': 'ordered and greater than', + 'oge': 'ordered and greater than or equal', + 'olt': 'ordered and less than', + 'ole': 'ordered and less than or equal', + 'one': 'ordered and not equal', + 'ord': 'ordered (no nans)', + 'ueq': 'unordered or equal', + 'ugt': 'unordered or greater than', + 'uge': 'unordered or greater than or equal', + 'ult': 'unordered or less than', + 'ule': 'unordered or less than or equal', + 'une': 'unordered or not equal', + 'uno': 'unordered (either nans)', + 'true': 'no comparison, always returns true', + } + VALID_FLAG = {'nnan', 'ninf', 'nsz', 'arcp', 'contract', 'afn', 'reassoc', + 'fast'} + + +class CastInstr(Instruction): + def __init__(self, parent, op, val, typ, name=''): + super(CastInstr, self).__init__(parent, typ, op, [val], name=name) + + def descr(self, buf): + buf.append("{0} {1} {2} to {3} {4}\n".format( + self.opname, + self.operands[0].type, + self.operands[0].get_reference(), + self.type, + self._stringify_metadata(leading_comma=True), + )) + + +class LoadInstr(Instruction): + + def __init__(self, parent, ptr, name=''): + super(LoadInstr, self).__init__(parent, ptr.type.pointee, "load", + [ptr], name=name) + self.align = None + + def descr(self, buf): + [val] = self.operands + if self.align is not None: + align = ', align %d' % (self.align) + else: + align = '' + buf.append("load {0}, {1} {2}{3}{4}\n".format( + val.type.pointee, + val.type, + val.get_reference(), + align, + self._stringify_metadata(leading_comma=True), + )) + + +class StoreInstr(Instruction): + def __init__(self, parent, val, ptr): + super(StoreInstr, self).__init__(parent, types.VoidType(), "store", + [val, ptr]) + + def descr(self, buf): + val, ptr = self.operands + if self.align is not None: + align = ', align %d' % (self.align) + else: + align = '' + buf.append("store {0} {1}, {2} {3}{4}{5}\n".format( + val.type, + val.get_reference(), + ptr.type, + ptr.get_reference(), + align, + self._stringify_metadata(leading_comma=True), + )) + + +class LoadAtomicInstr(Instruction): + def __init__(self, parent, ptr, ordering, align, name=''): + super(LoadAtomicInstr, self).__init__(parent, ptr.type.pointee, + "load atomic", [ptr], name=name) + self.ordering = ordering + self.align = align + + def descr(self, buf): + [val] = self.operands + buf.append("load atomic {0}, {1} {2} {3}, align {4}{5}\n".format( + val.type.pointee, + val.type, + val.get_reference(), + self.ordering, + self.align, + self._stringify_metadata(leading_comma=True), + )) + + +class StoreAtomicInstr(Instruction): + def __init__(self, parent, val, ptr, ordering, align): + super(StoreAtomicInstr, self).__init__(parent, types.VoidType(), + "store atomic", [val, ptr]) + self.ordering = ordering + self.align = align + + def descr(self, buf): + val, ptr = self.operands + buf.append("store atomic {0} {1}, {2} {3} {4}, align {5}{6}\n".format( + val.type, + val.get_reference(), + ptr.type, + ptr.get_reference(), + self.ordering, + self.align, + self._stringify_metadata(leading_comma=True), + )) + + +class AllocaInstr(Instruction): + def __init__(self, parent, typ, count, name): + operands = [count] if count else () + super(AllocaInstr, self).__init__(parent, typ.as_pointer(), "alloca", + operands, name) + self.align = None + + def descr(self, buf): + buf.append("{0} {1}".format(self.opname, self.type.pointee)) + if self.operands: + op, = self.operands + buf.append(", {0} {1}".format(op.type, op.get_reference())) + if self.align is not None: + buf.append(", align {0}".format(self.align)) + if self.metadata: + buf.append(self._stringify_metadata(leading_comma=True)) + + +class GEPInstr(Instruction): + def __init__(self, parent, ptr, indices, inbounds, name): + typ = ptr.type + lasttyp = None + lastaddrspace = 0 + for i in indices: + lasttyp, typ = typ, typ.gep(i) + # inherit the addrspace from the last seen pointer + if isinstance(lasttyp, types.PointerType): + lastaddrspace = lasttyp.addrspace + + if (not isinstance(typ, types.PointerType) and + isinstance(lasttyp, types.PointerType)): + typ = lasttyp + else: + typ = typ.as_pointer(lastaddrspace) + + super(GEPInstr, self).__init__(parent, typ, "getelementptr", + [ptr] + list(indices), name=name) + self.pointer = ptr + self.indices = indices + self.inbounds = inbounds + + def descr(self, buf): + indices = ['{0} {1}'.format(i.type, i.get_reference()) + for i in self.indices] + op = "getelementptr inbounds" if self.inbounds else "getelementptr" + buf.append("{0} {1}, {2} {3}, {4} {5}\n".format( + op, + self.pointer.type.pointee, + self.pointer.type, + self.pointer.get_reference(), + ', '.join(indices), + self._stringify_metadata(leading_comma=True), + )) + + +class PhiInstr(Instruction): + def __init__(self, parent, typ, name, flags=()): + super(PhiInstr, self).__init__(parent, typ, "phi", (), name=name, + flags=flags) + self.incomings = [] + + def descr(self, buf): + incs = ', '.join('[{0}, {1}]'.format(v.get_reference(), + b.get_reference()) + for v, b in self.incomings) + buf.append("phi {0} {1} {2} {3}\n".format( + ' '.join(self.flags), + self.type, + incs, + self._stringify_metadata(leading_comma=True), + )) + + def add_incoming(self, value, block): + assert isinstance(block, Block) + self.incomings.append((value, block)) + + def replace_usage(self, old, new): + self.incomings = [((new if val is old else val), blk) + for (val, blk) in self.incomings] + + +class ExtractElement(Instruction): + def __init__(self, parent, vector, index, name=''): + if not isinstance(vector.type, types.VectorType): + raise TypeError("vector needs to be of VectorType.") + if not isinstance(index.type, types.IntType): + raise TypeError("index needs to be of IntType.") + typ = vector.type.element + super(ExtractElement, self).__init__(parent, typ, "extractelement", + [vector, index], name=name) + + def descr(self, buf): + operands = ", ".join("{0} {1}".format( + op.type, op.get_reference()) for op in self.operands) + buf.append("{opname} {operands}\n".format( + opname=self.opname, operands=operands)) + + +class InsertElement(Instruction): + def __init__(self, parent, vector, value, index, name=''): + if not isinstance(vector.type, types.VectorType): + raise TypeError("vector needs to be of VectorType.") + if not value.type == vector.type.element: + raise TypeError( + "value needs to be of type {} not {}.".format( + vector.type.element, value.type)) + if not isinstance(index.type, types.IntType): + raise TypeError("index needs to be of IntType.") + typ = vector.type + super(InsertElement, self).__init__(parent, typ, "insertelement", + [vector, value, index], name=name) + + def descr(self, buf): + operands = ", ".join("{0} {1}".format( + op.type, op.get_reference()) for op in self.operands) + buf.append("{opname} {operands}\n".format( + opname=self.opname, operands=operands)) + + +class ShuffleVector(Instruction): + def __init__(self, parent, vector1, vector2, mask, name=''): + if not isinstance(vector1.type, types.VectorType): + raise TypeError("vector1 needs to be of VectorType.") + if vector2 != Undefined: + if vector2.type != vector1.type: + raise TypeError("vector2 needs to be " + + "Undefined or of the same type as vector1.") + if (not isinstance(mask, Constant) or + not isinstance(mask.type, types.VectorType) or + not (isinstance(mask.type.element, types.IntType) and + mask.type.element.width == 32)): + raise TypeError("mask needs to be a constant i32 vector.") + typ = types.VectorType(vector1.type.element, mask.type.count) + index_range = range(vector1.type.count + if vector2 == Undefined + else 2 * vector1.type.count) + if not all(ii.constant in index_range for ii in mask.constant): + raise IndexError( + "mask values need to be in {0}".format(index_range), + ) + super(ShuffleVector, self).__init__(parent, typ, "shufflevector", + [vector1, vector2, mask], name=name) + + def descr(self, buf): + buf.append("shufflevector {0} {1}\n".format( + ", ".join("{0} {1}".format(op.type, op.get_reference()) + for op in self.operands), + self._stringify_metadata(leading_comma=True), + )) + + +class ExtractValue(Instruction): + def __init__(self, parent, agg, indices, name=''): + typ = agg.type + try: + for i in indices: + typ = typ.elements[i] + except (AttributeError, IndexError): + raise TypeError("Can't index at %r in %s" + % (list(indices), agg.type)) + + super(ExtractValue, self).__init__(parent, typ, "extractvalue", + [agg], name=name) + + self.aggregate = agg + self.indices = indices + + def descr(self, buf): + indices = [str(i) for i in self.indices] + + buf.append("extractvalue {0} {1}, {2} {3}\n".format( + self.aggregate.type, + self.aggregate.get_reference(), + ', '.join(indices), + self._stringify_metadata(leading_comma=True), + )) + + +class InsertValue(Instruction): + def __init__(self, parent, agg, elem, indices, name=''): + typ = agg.type + try: + for i in indices: + typ = typ.elements[i] + except (AttributeError, IndexError): + raise TypeError("Can't index at %r in %s" + % (list(indices), agg.type)) + if elem.type != typ: + raise TypeError("Can only insert %s at %r in %s: got %s" + % (typ, list(indices), agg.type, elem.type)) + super(InsertValue, self).__init__(parent, agg.type, "insertvalue", + [agg, elem], name=name) + + self.aggregate = agg + self.value = elem + self.indices = indices + + def descr(self, buf): + indices = [str(i) for i in self.indices] + + buf.append("insertvalue {0} {1}, {2} {3}, {4} {5}\n".format( + self.aggregate.type, self.aggregate.get_reference(), + self.value.type, self.value.get_reference(), + ', '.join(indices), + self._stringify_metadata(leading_comma=True), + )) + + +class Unreachable(Instruction): + def __init__(self, parent): + super(Unreachable, self).__init__(parent, types.VoidType(), + "unreachable", (), name='') + + def descr(self, buf): + buf += (self.opname, "\n") + + +class InlineAsm(object): + def __init__(self, ftype, asm, constraint, side_effect=False): + self.type = ftype.return_type + self.function_type = ftype + self.asm = asm + self.constraint = constraint + self.side_effect = side_effect + + def descr(self, buf): + sideeffect = 'sideeffect' if self.side_effect else '' + fmt = 'asm {sideeffect} "{asm}", "{constraint}"\n' + buf.append(fmt.format(sideeffect=sideeffect, asm=self.asm, + constraint=self.constraint)) + + def get_reference(self): + buf = [] + self.descr(buf) + return "".join(buf) + + def __str__(self): + return "{0} {1}".format(self.type, self.get_reference()) + + +class AtomicRMW(Instruction): + def __init__(self, parent, op, ptr, val, ordering, name): + super(AtomicRMW, self).__init__(parent, val.type, "atomicrmw", + (ptr, val), name=name) + self.operation = op + self.ordering = ordering + + def descr(self, buf): + ptr, val = self.operands + fmt = ("atomicrmw {op} {ptrty} {ptr}, {valty} {val} {ordering} " + "{metadata}\n") + buf.append(fmt.format(op=self.operation, + ptrty=ptr.type, + ptr=ptr.get_reference(), + valty=val.type, + val=val.get_reference(), + ordering=self.ordering, + metadata=self._stringify_metadata( + leading_comma=True), + )) + + +class CmpXchg(Instruction): + """This instruction has changed since llvm3.5. It is not compatible with + older llvm versions. + """ + + def __init__(self, parent, ptr, cmp, val, ordering, failordering, name): + outtype = types.LiteralStructType([val.type, types.IntType(1)]) + super(CmpXchg, self).__init__(parent, outtype, "cmpxchg", + (ptr, cmp, val), name=name) + self.ordering = ordering + self.failordering = failordering + + def descr(self, buf): + ptr, cmpval, val = self.operands + fmt = "cmpxchg {ptrty} {ptr}, {ty} {cmp}, {ty} {val} {ordering} " \ + "{failordering} {metadata}\n" + buf.append(fmt.format(ptrty=ptr.type, + ptr=ptr.get_reference(), + ty=cmpval.type, + cmp=cmpval.get_reference(), + val=val.get_reference(), + ordering=self.ordering, + failordering=self.failordering, + metadata=self._stringify_metadata( + leading_comma=True), + )) + + +class _LandingPadClause(object): + def __init__(self, value): + self.value = value + + def __str__(self): + return "{kind} {type} {value}".format( + kind=self.kind, + type=self.value.type, + value=self.value.get_reference()) + + +class CatchClause(_LandingPadClause): + kind = 'catch' + + +class FilterClause(_LandingPadClause): + kind = 'filter' + + def __init__(self, value): + assert isinstance(value, Constant) + assert isinstance(value.type, types.ArrayType) + super(FilterClause, self).__init__(value) + + +class LandingPadInstr(Instruction): + def __init__(self, parent, typ, name='', cleanup=False): + super(LandingPadInstr, self).__init__(parent, typ, "landingpad", [], + name=name) + self.cleanup = cleanup + self.clauses = [] + + def add_clause(self, clause): + assert isinstance(clause, _LandingPadClause) + self.clauses.append(clause) + + def descr(self, buf): + fmt = "landingpad {type}{cleanup}{clauses}\n" + buf.append(fmt.format(type=self.type, + cleanup=' cleanup' if self.cleanup else '', + clauses=''.join(["\n {0}".format(clause) + for clause in self.clauses]), + )) + + +class Fence(Instruction): + """ + The `fence` instruction. + + As of LLVM 5.0.1: + + fence [syncscope("")] ; yields void + """ + + VALID_FENCE_ORDERINGS = {"acquire", "release", "acq_rel", "seq_cst"} + + def __init__(self, parent, ordering, targetscope=None, name=''): + super(Fence, self).__init__(parent, types.VoidType(), "fence", (), + name=name) + if ordering not in self.VALID_FENCE_ORDERINGS: + msg = "Invalid fence ordering \"{0}\"! Should be one of {1}." + raise ValueError(msg .format(ordering, + ", ".join(self.VALID_FENCE_ORDERINGS))) + self.ordering = ordering + self.targetscope = targetscope + + def descr(self, buf): + if self.targetscope is None: + syncscope = "" + else: + syncscope = 'syncscope("{0}") '.format(self.targetscope) + + fmt = "fence {syncscope}{ordering}\n" + buf.append(fmt.format(syncscope=syncscope, + ordering=self.ordering, + )) + + +class Comment(Instruction): + """ + A line comment. + """ + + def __init__(self, parent, text): + super(Comment, self).__init__(parent, types.VoidType(), ";", (), + name='') + assert "\n" not in text, "Comment cannot contain new line" + self.text = text + + def descr(self, buf): + buf.append(f"; {self.text}") diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/module.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/module.py new file mode 100644 index 0000000000000000000000000000000000000000..464f91ec34344e02bd9fb7e5f6dec46803407f00 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/module.py @@ -0,0 +1,246 @@ +import collections + +from llvmlite.ir import context, values, types, _utils + + +class Module(object): + def __init__(self, name='', context=context.global_context): + self.context = context + self.name = name # name is for debugging/informational + self.data_layout = "" + self.scope = _utils.NameScope() + self.triple = 'unknown-unknown-unknown' + self.globals = collections.OrderedDict() + # Innamed metadata nodes. + self.metadata = [] + # Named metadata nodes + self.namedmetadata = {} + # Cache for metadata node deduplication + self._metadatacache = {} + + def _fix_metadata_operands(self, operands): + fixed_ops = [] + for op in operands: + if op is None: + # A literal None creates a null metadata value + op = types.MetaDataType()(None) + elif isinstance(op, str): + # A literal string creates a metadata string value + op = values.MetaDataString(self, op) + elif isinstance(op, (list, tuple)): + # A sequence creates a metadata node reference + op = self.add_metadata(op) + fixed_ops.append(op) + return fixed_ops + + def _fix_di_operands(self, operands): + fixed_ops = [] + for name, op in operands: + if isinstance(op, (list, tuple)): + # A sequence creates a metadata node reference + op = self.add_metadata(op) + fixed_ops.append((name, op)) + return fixed_ops + + def add_metadata(self, operands): + """ + Add an unnamed metadata to the module with the given *operands* + (a sequence of values) or return a previous equivalent metadata. + A MDValue instance is returned, it can then be associated to + e.g. an instruction. + """ + if not isinstance(operands, (list, tuple)): + raise TypeError("expected a list or tuple of metadata values, " + "got %r" % (operands,)) + operands = self._fix_metadata_operands(operands) + key = tuple(operands) + if key not in self._metadatacache: + n = len(self.metadata) + md = values.MDValue(self, operands, name=str(n)) + self._metadatacache[key] = md + else: + md = self._metadatacache[key] + return md + + def add_debug_info(self, kind, operands, is_distinct=False): + """ + Add debug information metadata to the module with the given + *operands* (a dict of values with string keys) or return + a previous equivalent metadata. *kind* is a string of the + debug information kind (e.g. "DICompileUnit"). + + A DIValue instance is returned, it can then be associated to e.g. + an instruction. + """ + operands = tuple(sorted(self._fix_di_operands(operands.items()))) + key = (kind, operands, is_distinct) + if key not in self._metadatacache: + n = len(self.metadata) + di = values.DIValue(self, is_distinct, kind, operands, name=str(n)) + self._metadatacache[key] = di + else: + di = self._metadatacache[key] + return di + + def add_named_metadata(self, name, element=None): + """ + Add a named metadata node to the module, if it doesn't exist, + or return the existing node. + If *element* is given, it will append a new element to + the named metadata node. If *element* is a sequence of values + (rather than a metadata value), a new unnamed node will first be + created. + + Example:: + module.add_named_metadata("llvm.ident", ["llvmlite/1.0"]) + """ + if name in self.namedmetadata: + nmd = self.namedmetadata[name] + else: + nmd = self.namedmetadata[name] = values.NamedMetaData(self) + if element is not None: + if not isinstance(element, values.Value): + element = self.add_metadata(element) + if not isinstance(element.type, types.MetaDataType): + raise TypeError("wrong type for metadata element: got %r" + % (element,)) + nmd.add(element) + return nmd + + def get_named_metadata(self, name): + """ + Return the metadata node with the given *name*. KeyError is raised + if no such node exists (contrast with add_named_metadata()). + """ + return self.namedmetadata[name] + + @property + def functions(self): + """ + A list of functions declared or defined in this module. + """ + return [v for v in self.globals.values() + if isinstance(v, values.Function)] + + @property + def global_values(self): + """ + An iterable of global values in this module. + """ + return self.globals.values() + + def get_global(self, name): + """ + Get a global value by name. + """ + return self.globals[name] + + def add_global(self, globalvalue): + """ + Add a new global value. + """ + assert globalvalue.name not in self.globals + self.globals[globalvalue.name] = globalvalue + + def get_unique_name(self, name=''): + """ + Get a unique global name with the following *name* hint. + """ + return self.scope.deduplicate(name) + + def declare_intrinsic(self, intrinsic, tys=(), fnty=None): + def _error(): + raise NotImplementedError("unknown intrinsic %r with %d types" + % (intrinsic, len(tys))) + + if intrinsic in {'llvm.cttz', 'llvm.ctlz', 'llvm.fma'}: + suffixes = [tys[0].intrinsic_name] + else: + suffixes = [t.intrinsic_name for t in tys] + name = '.'.join([intrinsic] + suffixes) + if name in self.globals: + return self.globals[name] + + if fnty is not None: + # General case: function type is given + pass + # Compute function type if omitted for common cases + elif len(tys) == 0 and intrinsic == 'llvm.assume': + fnty = types.FunctionType(types.VoidType(), [types.IntType(1)]) + elif len(tys) == 1: + if intrinsic == 'llvm.powi': + fnty = types.FunctionType(tys[0], [tys[0], types.IntType(32)]) + elif intrinsic == 'llvm.pow': + fnty = types.FunctionType(tys[0], tys * 2) + elif intrinsic == 'llvm.convert.from.fp16': + fnty = types.FunctionType(tys[0], [types.IntType(16)]) + elif intrinsic == 'llvm.convert.to.fp16': + fnty = types.FunctionType(types.IntType(16), tys) + else: + fnty = types.FunctionType(tys[0], tys) + elif len(tys) == 2: + if intrinsic == 'llvm.memset': + tys = [tys[0], types.IntType(8), tys[1], + types.IntType(1)] + fnty = types.FunctionType(types.VoidType(), tys) + elif intrinsic in {'llvm.cttz', 'llvm.ctlz'}: + tys = [tys[0], types.IntType(1)] + fnty = types.FunctionType(tys[0], tys) + else: + _error() + elif len(tys) == 3: + if intrinsic in ('llvm.memcpy', 'llvm.memmove'): + tys = tys + [types.IntType(1)] + fnty = types.FunctionType(types.VoidType(), tys) + elif intrinsic == 'llvm.fma': + tys = [tys[0]] * 3 + fnty = types.FunctionType(tys[0], tys) + else: + _error() + else: + _error() + return values.Function(self, fnty, name=name) + + def get_identified_types(self): + return self.context.identified_types + + def _get_body_lines(self): + # Type declarations + lines = [it.get_declaration() + for it in self.get_identified_types().values()] + # Global values (including function definitions) + lines += [str(v) for v in self.globals.values()] + return lines + + def _get_metadata_lines(self): + mdbuf = [] + for k, v in self.namedmetadata.items(): + mdbuf.append("!{name} = !{{ {operands} }}".format( + name=k, operands=', '.join(i.get_reference() + for i in v.operands))) + for md in self.metadata: + mdbuf.append(str(md)) + return mdbuf + + def _stringify_body(self): + # For testing + return "\n".join(self._get_body_lines()) + + def _stringify_metadata(self): + # For testing + return "\n".join(self._get_metadata_lines()) + + def __repr__(self): + lines = [] + # Header + lines += [ + '; ModuleID = "%s"' % (self.name,), + 'target triple = "%s"' % (self.triple,), + 'target datalayout = "%s"' % (self.data_layout,), + ''] + # Body + lines += self._get_body_lines() + # Metadata + lines += self._get_metadata_lines() + + return "\n".join(lines) diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/transforms.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a69113d36e93b6b9e42a2a15119697375cff62e5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/transforms.py @@ -0,0 +1,64 @@ +from llvmlite.ir import CallInstr + + +class Visitor(object): + def visit(self, module): + self._module = module + for func in module.functions: + self.visit_Function(func) + + def visit_Function(self, func): + self._function = func + for bb in func.blocks: + self.visit_BasicBlock(bb) + + def visit_BasicBlock(self, bb): + self._basic_block = bb + for instr in bb.instructions: + self.visit_Instruction(instr) + + def visit_Instruction(self, instr): + raise NotImplementedError + + @property + def module(self): + return self._module + + @property + def function(self): + return self._function + + @property + def basic_block(self): + return self._basic_block + + +class CallVisitor(Visitor): + def visit_Instruction(self, instr): + if isinstance(instr, CallInstr): + self.visit_Call(instr) + + def visit_Call(self, instr): + raise NotImplementedError + + +class ReplaceCalls(CallVisitor): + def __init__(self, orig, repl): + super(ReplaceCalls, self).__init__() + self.orig = orig + self.repl = repl + self.calls = [] + + def visit_Call(self, instr): + if instr.callee == self.orig: + instr.replace_callee(self.repl) + self.calls.append(instr) + + +def replace_all_calls(mod, orig, repl): + """Replace all calls to `orig` to `repl` in module `mod`. + Returns the references to the returned calls + """ + rc = ReplaceCalls(orig, repl) + rc.visit(mod) + return rc.calls diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/types.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/types.py new file mode 100644 index 0000000000000000000000000000000000000000..707246e617f04c2636b0098c6cb4b58f63873522 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/types.py @@ -0,0 +1,614 @@ +""" +Classes that are LLVM types +""" + +import struct + +from llvmlite.ir._utils import _StrCaching + + +def _wrapname(x): + return '"{0}"'.format(x.replace('\\', '\\5c').replace('"', '\\22')) + + +class Type(_StrCaching): + """ + The base class for all LLVM types. + """ + is_pointer = False + null = 'zeroinitializer' + + def __repr__(self): + return "<%s %s>" % (type(self), str(self)) + + def _to_string(self): + raise NotImplementedError + + def as_pointer(self, addrspace=0): + return PointerType(self, addrspace) + + def __ne__(self, other): + return not (self == other) + + def _get_ll_pointer_type(self, target_data, context=None): + """ + Convert this type object to an LLVM type. + """ + from llvmlite.ir import Module, GlobalVariable + from llvmlite.binding import parse_assembly + + if context is None: + m = Module() + else: + m = Module(context=context) + foo = GlobalVariable(m, self, name="foo") + with parse_assembly(str(m)) as llmod: + return llmod.get_global_variable(foo.name).type + + def get_abi_size(self, target_data, context=None): + """ + Get the ABI size of this type according to data layout *target_data*. + """ + llty = self._get_ll_pointer_type(target_data, context) + return target_data.get_pointee_abi_size(llty) + + def get_abi_alignment(self, target_data, context=None): + """ + Get the minimum ABI alignment of this type according to data layout + *target_data*. + """ + llty = self._get_ll_pointer_type(target_data, context) + return target_data.get_pointee_abi_alignment(llty) + + def format_constant(self, value): + """ + Format constant *value* of this type. This method may be overriden + by subclasses. + """ + return str(value) + + def wrap_constant_value(self, value): + """ + Wrap constant *value* if necessary. This method may be overriden + by subclasses (especially aggregate types). + """ + return value + + def __call__(self, value): + """ + Create a LLVM constant of this type with the given Python value. + """ + from llvmlite.ir import Constant + return Constant(self, value) + + +class MetaDataType(Type): + + def _to_string(self): + return "metadata" + + def as_pointer(self): + raise TypeError + + def __eq__(self, other): + return isinstance(other, MetaDataType) + + def __hash__(self): + return hash(MetaDataType) + + +class LabelType(Type): + """ + The label type is the type of e.g. basic blocks. + """ + + def _to_string(self): + return "label" + + +class PointerType(Type): + """ + The type of all pointer values. + """ + is_pointer = True + null = 'null' + + def __init__(self, pointee, addrspace=0): + assert not isinstance(pointee, VoidType) + self.pointee = pointee + self.addrspace = addrspace + + def _to_string(self): + if self.addrspace != 0: + return "{0} addrspace({1})*".format(self.pointee, self.addrspace) + else: + return "{0}*".format(self.pointee) + + def __eq__(self, other): + if isinstance(other, PointerType): + return (self.pointee, self.addrspace) == (other.pointee, + other.addrspace) + else: + return False + + def __hash__(self): + return hash(PointerType) + + def gep(self, i): + """ + Resolve the type of the i-th element (for getelementptr lookups). + """ + if not isinstance(i.type, IntType): + raise TypeError(i.type) + return self.pointee + + @property + def intrinsic_name(self): + return 'p%d%s' % (self.addrspace, self.pointee.intrinsic_name) + + +class VoidType(Type): + """ + The type for empty values (e.g. a function returning no value). + """ + + def _to_string(self): + return 'void' + + def __eq__(self, other): + return isinstance(other, VoidType) + + def __hash__(self): + return hash(VoidType) + + +class FunctionType(Type): + """ + The type for functions. + """ + + def __init__(self, return_type, args, var_arg=False): + self.return_type = return_type + self.args = tuple(args) + self.var_arg = var_arg + + def _to_string(self): + if self.args: + strargs = ', '.join([str(a) for a in self.args]) + if self.var_arg: + return '{0} ({1}, ...)'.format(self.return_type, strargs) + else: + return '{0} ({1})'.format(self.return_type, strargs) + elif self.var_arg: + return '{0} (...)'.format(self.return_type) + else: + return '{0} ()'.format(self.return_type) + + def __eq__(self, other): + if isinstance(other, FunctionType): + return (self.return_type == other.return_type and + self.args == other.args and self.var_arg == other.var_arg) + else: + return False + + def __hash__(self): + return hash(FunctionType) + + +class IntType(Type): + """ + The type for integers. + """ + null = '0' + _instance_cache = {} + width: int + + def __new__(cls, bits): + # Cache all common integer types + if 0 <= bits <= 128: + try: + return cls._instance_cache[bits] + except KeyError: + inst = cls._instance_cache[bits] = cls.__new(bits) + return inst + return cls.__new(bits) + + @classmethod + def __new(cls, bits): + assert isinstance(bits, int) and bits >= 0 + self = super(IntType, cls).__new__(cls) + self.width = bits + return self + + def __getnewargs__(self): + return self.width, + + def __copy__(self): + return self + + def _to_string(self): + return 'i%u' % (self.width,) + + def __eq__(self, other): + if isinstance(other, IntType): + return self.width == other.width + else: + return False + + def __hash__(self): + return hash(IntType) + + def format_constant(self, val): + if isinstance(val, bool): + return str(val).lower() + else: + return str(val) + + def wrap_constant_value(self, val): + if val is None: + return 0 + return val + + @property + def intrinsic_name(self): + return str(self) + + +def _as_float(value): + """ + Truncate to single-precision float. + """ + return struct.unpack('f', struct.pack('f', value))[0] + + +def _as_half(value): + """ + Truncate to half-precision float. + """ + try: + return struct.unpack('e', struct.pack('e', value))[0] + except struct.error: + # 'e' only added in Python 3.6+ + return _as_float(value) + + +def _format_float_as_hex(value, packfmt, unpackfmt, numdigits): + raw = struct.pack(packfmt, float(value)) + intrep = struct.unpack(unpackfmt, raw)[0] + out = '{{0:#{0}x}}'.format(numdigits).format(intrep) + return out + + +def _format_double(value): + """ + Format *value* as a hexadecimal string of its IEEE double precision + representation. + """ + return _format_float_as_hex(value, 'd', 'Q', 16) + + +class _BaseFloatType(Type): + + def __new__(cls): + return cls._instance_cache + + def __eq__(self, other): + return isinstance(other, type(self)) + + def __hash__(self): + return hash(type(self)) + + @classmethod + def _create_instance(cls): + cls._instance_cache = super(_BaseFloatType, cls).__new__(cls) + + +class HalfType(_BaseFloatType): + """ + The type for single-precision floats. + """ + null = '0.0' + intrinsic_name = 'f16' + + def __str__(self): + return 'half' + + def format_constant(self, value): + return _format_double(_as_half(value)) + + +class FloatType(_BaseFloatType): + """ + The type for single-precision floats. + """ + null = '0.0' + intrinsic_name = 'f32' + + def __str__(self): + return 'float' + + def format_constant(self, value): + return _format_double(_as_float(value)) + + +class DoubleType(_BaseFloatType): + """ + The type for double-precision floats. + """ + null = '0.0' + intrinsic_name = 'f64' + + def __str__(self): + return 'double' + + def format_constant(self, value): + return _format_double(value) + + +for _cls in (HalfType, FloatType, DoubleType): + _cls._create_instance() + + +class _Repeat(object): + def __init__(self, value, size): + self.value = value + self.size = size + + def __len__(self): + return self.size + + def __getitem__(self, item): + if 0 <= item < self.size: + return self.value + else: + raise IndexError(item) + + +class VectorType(Type): + """ + The type for vectors of primitive data items (e.g. ""). + """ + + def __init__(self, element, count): + self.element = element + self.count = count + + @property + def elements(self): + return _Repeat(self.element, self.count) + + def __len__(self): + return self.count + + def _to_string(self): + return "<%d x %s>" % (self.count, self.element) + + def __eq__(self, other): + if isinstance(other, VectorType): + return self.element == other.element and self.count == other.count + + def __hash__(self): + # TODO: why does this not take self.element/self.count into account? + return hash(VectorType) + + def __copy__(self): + return self + + def format_constant(self, value): + itemstring = ", " .join(["{0} {1}".format(x.type, x.get_reference()) + for x in value]) + return "<{0}>".format(itemstring) + + def wrap_constant_value(self, values): + from . import Value, Constant + if not isinstance(values, (list, tuple)): + if isinstance(values, Constant): + if values.type != self.element: + raise TypeError("expected {} for {}".format( + self.element, values.type)) + return (values, ) * self.count + return (Constant(self.element, values), ) * self.count + if len(values) != len(self): + raise ValueError("wrong constant size for %s: got %d elements" + % (self, len(values))) + return [Constant(ty, val) if not isinstance(val, Value) else val + for ty, val in zip(self.elements, values)] + + +class Aggregate(Type): + """ + Base class for aggregate types. + See http://llvm.org/docs/LangRef.html#t-aggregate + """ + + def wrap_constant_value(self, values): + from . import Value, Constant + + if not isinstance(values, (list, tuple)): + return values + if len(values) != len(self): + raise ValueError("wrong constant size for %s: got %d elements" + % (self, len(values))) + return [Constant(ty, val) if not isinstance(val, Value) else val + for ty, val in zip(self.elements, values)] + + +class ArrayType(Aggregate): + """ + The type for fixed-size homogenous arrays (e.g. "[f32 x 3]"). + """ + + def __init__(self, element, count): + self.element = element + self.count = count + + @property + def elements(self): + return _Repeat(self.element, self.count) + + def __len__(self): + return self.count + + def _to_string(self): + return "[%d x %s]" % (self.count, self.element) + + def __eq__(self, other): + if isinstance(other, ArrayType): + return self.element == other.element and self.count == other.count + + def __hash__(self): + return hash(ArrayType) + + def gep(self, i): + """ + Resolve the type of the i-th element (for getelementptr lookups). + """ + if not isinstance(i.type, IntType): + raise TypeError(i.type) + return self.element + + def format_constant(self, value): + itemstring = ", " .join(["{0} {1}".format(x.type, x.get_reference()) + for x in value]) + return "[{0}]".format(itemstring) + + +class BaseStructType(Aggregate): + """ + The base type for heterogenous struct types. + """ + _packed = False + + @property + def packed(self): + """ + A boolean attribute that indicates whether the structure uses + packed layout. + """ + return self._packed + + @packed.setter + def packed(self, val): + self._packed = bool(val) + + def __len__(self): + assert self.elements is not None + return len(self.elements) + + def __iter__(self): + assert self.elements is not None + return iter(self.elements) + + @property + def is_opaque(self): + return self.elements is None + + def structure_repr(self): + """ + Return the LLVM IR for the structure representation + """ + ret = '{%s}' % ', '.join([str(x) for x in self.elements]) + return self._wrap_packed(ret) + + def format_constant(self, value): + itemstring = ", " .join(["{0} {1}".format(x.type, x.get_reference()) + for x in value]) + ret = "{{{0}}}".format(itemstring) + return self._wrap_packed(ret) + + def gep(self, i): + """ + Resolve the type of the i-th element (for getelementptr lookups). + + *i* needs to be a LLVM constant, so that the type can be determined + at compile-time. + """ + if not isinstance(i.type, IntType): + raise TypeError(i.type) + return self.elements[i.constant] + + def _wrap_packed(self, textrepr): + """ + Internal helper to wrap textual repr of struct type into packed struct + """ + if self.packed: + return '<{}>'.format(textrepr) + else: + return textrepr + + +class LiteralStructType(BaseStructType): + """ + The type of "literal" structs, i.e. structs with a literally-defined + type (by contrast with IdentifiedStructType). + """ + + null = 'zeroinitializer' + + def __init__(self, elems, packed=False): + """ + *elems* is a sequence of types to be used as members. + *packed* controls the use of packed layout. + """ + self.elements = tuple(elems) + self.packed = packed + + def _to_string(self): + return self.structure_repr() + + def __eq__(self, other): + if isinstance(other, LiteralStructType): + return self.elements == other.elements + + def __hash__(self): + return hash(LiteralStructType) + + +class IdentifiedStructType(BaseStructType): + """ + A type which is a named alias for another struct type, akin to a typedef. + While literal struct types can be structurally equal (see + LiteralStructType), identified struct types are compared by name. + + Do not use this directly. + """ + null = 'zeroinitializer' + + def __init__(self, context, name, packed=False): + """ + *context* is a llvmlite.ir.Context. + *name* is the identifier for the new struct type. + *packed* controls the use of packed layout. + """ + assert name + self.context = context + self.name = name + self.elements = None + self.packed = packed + + def _to_string(self): + return "%{name}".format(name=_wrapname(self.name)) + + def get_declaration(self): + """ + Returns the string for the declaration of the type + """ + if self.is_opaque: + out = "{strrep} = type opaque".format(strrep=str(self)) + else: + out = "{strrep} = type {struct}".format( + strrep=str(self), struct=self.structure_repr()) + return out + + def __eq__(self, other): + if isinstance(other, IdentifiedStructType): + return self.name == other.name + + def __hash__(self): + return hash(IdentifiedStructType) + + def set_body(self, *elems): + if not self.is_opaque: + raise RuntimeError("{name} is already defined".format( + name=self.name)) + self.elements = tuple(elems) diff --git a/vllm/lib/python3.10/site-packages/llvmlite/ir/values.py b/vllm/lib/python3.10/site-packages/llvmlite/ir/values.py new file mode 100644 index 0000000000000000000000000000000000000000..3818e6f34aa37b72a533283afb363cfe18bf8f6e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/ir/values.py @@ -0,0 +1,1217 @@ +""" +Classes that are LLVM values: Value, Constant... +Instructions are in the instructions module. +""" + +import functools +import string +import re +from types import MappingProxyType + +from llvmlite.ir import values, types, _utils +from llvmlite.ir._utils import (_StrCaching, _StringReferenceCaching, + _HasMetadata) + +_VALID_CHARS = (frozenset(map(ord, string.ascii_letters)) | + frozenset(map(ord, string.digits)) | + frozenset(map(ord, ' !#$%&\'()*+,-./:;<=>?@[]^_`{|}~'))) + +_SIMPLE_IDENTIFIER_RE = re.compile(r"[-a-zA-Z$._][-a-zA-Z$._0-9]*$") + +_CMP_MAP = { + '>': 'gt', + '<': 'lt', + '==': 'eq', + '!=': 'ne', + '>=': 'ge', + '<=': 'le', +} + + +def _escape_string(text, _map={}): + """ + Escape the given bytestring for safe use as a LLVM array constant. + Any unicode string input is first encoded with utf8 into bytes. + """ + if isinstance(text, str): + text = text.encode() + assert isinstance(text, (bytes, bytearray)) + + if not _map: + for ch in range(256): + if ch in _VALID_CHARS: + _map[ch] = chr(ch) + else: + _map[ch] = '\\%02x' % ch + + buf = [_map[ch] for ch in text] + return ''.join(buf) + + +def _binop(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(lhs, rhs): + if lhs.type != rhs.type: + raise ValueError("Operands must be the same type, got (%s, %s)" + % (lhs.type, rhs.type)) + + fmt = "{0} ({1} {2}, {3} {4})".format(opname, + lhs.type, lhs.get_reference(), + rhs.type, rhs.get_reference()) + return FormattedConstant(lhs.type, fmt) + + return wrapped + return wrap + + +def _castop(opname): + def wrap(fn): + @functools.wraps(fn) + def wrapped(self, typ): + fn(self, typ) + if typ == self.type: + return self + + op = "{0} ({1} {2} to {3})".format(opname, self.type, + self.get_reference(), typ) + return FormattedConstant(typ, op) + + return wrapped + return wrap + + +class _ConstOpMixin(object): + """ + A mixin defining constant operations, for use in constant-like classes. + """ + + # + # Arithmetic APIs + # + + @_binop('shl') + def shl(self, other): + """ + Left integer shift: + lhs << rhs + """ + + @_binop('lshr') + def lshr(self, other): + """ + Logical (unsigned) right integer shift: + lhs >> rhs + """ + + @_binop('ashr') + def ashr(self, other): + """ + Arithmetic (signed) right integer shift: + lhs >> rhs + """ + + @_binop('add') + def add(self, other): + """ + Integer addition: + lhs + rhs + """ + + @_binop('fadd') + def fadd(self, other): + """ + Floating-point addition: + lhs + rhs + """ + + @_binop('sub') + def sub(self, other): + """ + Integer subtraction: + lhs - rhs + """ + + @_binop('fsub') + def fsub(self, other): + """ + Floating-point subtraction: + lhs - rhs + """ + + @_binop('mul') + def mul(self, other): + """ + Integer multiplication: + lhs * rhs + """ + + @_binop('fmul') + def fmul(self, other): + """ + Floating-point multiplication: + lhs * rhs + """ + + @_binop('udiv') + def udiv(self, other): + """ + Unsigned integer division: + lhs / rhs + """ + + @_binop('sdiv') + def sdiv(self, other): + """ + Signed integer division: + lhs / rhs + """ + + @_binop('fdiv') + def fdiv(self, other): + """ + Floating-point division: + lhs / rhs + """ + + @_binop('urem') + def urem(self, other): + """ + Unsigned integer remainder: + lhs % rhs + """ + + @_binop('srem') + def srem(self, other): + """ + Signed integer remainder: + lhs % rhs + """ + + @_binop('frem') + def frem(self, other): + """ + Floating-point remainder: + lhs % rhs + """ + + @_binop('or') + def or_(self, other): + """ + Bitwise integer OR: + lhs | rhs + """ + + @_binop('and') + def and_(self, other): + """ + Bitwise integer AND: + lhs & rhs + """ + + @_binop('xor') + def xor(self, other): + """ + Bitwise integer XOR: + lhs ^ rhs + """ + + def _cmp(self, prefix, sign, cmpop, other): + ins = prefix + 'cmp' + try: + op = _CMP_MAP[cmpop] + except KeyError: + raise ValueError("invalid comparison %r for %s" % (cmpop, ins)) + + if not (prefix == 'i' and cmpop in ('==', '!=')): + op = sign + op + + if self.type != other.type: + raise ValueError("Operands must be the same type, got (%s, %s)" + % (self.type, other.type)) + + fmt = "{0} {1} ({2} {3}, {4} {5})".format( + ins, op, + self.type, self.get_reference(), + other.type, other.get_reference()) + + return FormattedConstant(types.IntType(1), fmt) + + def icmp_signed(self, cmpop, other): + """ + Signed integer comparison: + lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=' + """ + return self._cmp('i', 's', cmpop, other) + + def icmp_unsigned(self, cmpop, other): + """ + Unsigned integer (or pointer) comparison: + lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=' + """ + return self._cmp('i', 'u', cmpop, other) + + def fcmp_ordered(self, cmpop, other): + """ + Floating-point ordered comparison: + lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno' + """ + return self._cmp('f', 'o', cmpop, other) + + def fcmp_unordered(self, cmpop, other): + """ + Floating-point unordered comparison: + lhs rhs + + where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno' + """ + return self._cmp('f', 'u', cmpop, other) + + # + # Unary APIs + # + + def not_(self): + """ + Bitwise integer complement: + ~value + """ + if isinstance(self.type, types.VectorType): + rhs = values.Constant(self.type, (-1,) * self.type.count) + else: + rhs = values.Constant(self.type, -1) + + return self.xor(rhs) + + def neg(self): + """ + Integer negative: + -value + """ + zero = values.Constant(self.type, 0) + return zero.sub(self) + + def fneg(self): + """ + Floating-point negative: + -value + """ + fmt = "fneg ({0} {1})".format(self.type, self.get_reference()) + return FormattedConstant(self.type, fmt) + + # + # Cast APIs + # + + @_castop('trunc') + def trunc(self, typ): + """ + Truncating integer downcast to a smaller type. + """ + + @_castop('zext') + def zext(self, typ): + """ + Zero-extending integer upcast to a larger type + """ + + @_castop('sext') + def sext(self, typ): + """ + Sign-extending integer upcast to a larger type. + """ + + @_castop('fptrunc') + def fptrunc(self, typ): + """ + Floating-point downcast to a less precise type. + """ + + @_castop('fpext') + def fpext(self, typ): + """ + Floating-point upcast to a more precise type. + """ + + @_castop('bitcast') + def bitcast(self, typ): + """ + Pointer cast to a different pointer type. + """ + + @_castop('fptoui') + def fptoui(self, typ): + """ + Convert floating-point to unsigned integer. + """ + + @_castop('uitofp') + def uitofp(self, typ): + """ + Convert unsigned integer to floating-point. + """ + + @_castop('fptosi') + def fptosi(self, typ): + """ + Convert floating-point to signed integer. + """ + + @_castop('sitofp') + def sitofp(self, typ): + """ + Convert signed integer to floating-point. + """ + + @_castop('ptrtoint') + def ptrtoint(self, typ): + """ + Cast pointer to integer. + """ + if not isinstance(self.type, types.PointerType): + msg = "can only call ptrtoint() on pointer type, not '%s'" + raise TypeError(msg % (self.type,)) + if not isinstance(typ, types.IntType): + raise TypeError("can only ptrtoint() to integer type, not '%s'" + % (typ,)) + + @_castop('inttoptr') + def inttoptr(self, typ): + """ + Cast integer to pointer. + """ + if not isinstance(self.type, types.IntType): + msg = "can only call inttoptr() on integer constants, not '%s'" + raise TypeError(msg % (self.type,)) + if not isinstance(typ, types.PointerType): + raise TypeError("can only inttoptr() to pointer type, not '%s'" + % (typ,)) + + def gep(self, indices): + """ + Call getelementptr on this pointer constant. + """ + if not isinstance(self.type, types.PointerType): + raise TypeError("can only call gep() on pointer constants, not '%s'" + % (self.type,)) + + outtype = self.type + for i in indices: + outtype = outtype.gep(i) + + strindices = ["{0} {1}".format(idx.type, idx.get_reference()) + for idx in indices] + + op = "getelementptr ({0}, {1} {2}, {3})".format( + self.type.pointee, self.type, + self.get_reference(), ', '.join(strindices)) + return FormattedConstant(outtype.as_pointer(self.addrspace), op) + + +class Value(object): + """ + The base class for all values. + """ + + def __repr__(self): + return "" % (self.__class__.__name__, self.type,) + + +class _Undefined(object): + """ + 'undef': a value for undefined values. + """ + def __new__(cls): + try: + return Undefined + except NameError: + return object.__new__(_Undefined) + + +Undefined = _Undefined() + + +class Constant(_StrCaching, _StringReferenceCaching, _ConstOpMixin, Value): + """ + A constant LLVM value. + """ + + def __init__(self, typ, constant): + assert isinstance(typ, types.Type) + assert not isinstance(typ, types.VoidType) + self.type = typ + constant = typ.wrap_constant_value(constant) + self.constant = constant + + def _to_string(self): + return '{0} {1}'.format(self.type, self.get_reference()) + + def _get_reference(self): + if self.constant is None: + val = self.type.null + + elif self.constant is Undefined: + val = "undef" + + elif isinstance(self.constant, bytearray): + val = 'c"{0}"'.format(_escape_string(self.constant)) + + else: + val = self.type.format_constant(self.constant) + + return val + + @classmethod + def literal_array(cls, elems): + """ + Construct a literal array constant made of the given members. + """ + tys = [el.type for el in elems] + if len(tys) == 0: + raise ValueError("need at least one element") + ty = tys[0] + for other in tys: + if ty != other: + raise TypeError("all elements must have the same type") + return cls(types.ArrayType(ty, len(elems)), elems) + + @classmethod + def literal_struct(cls, elems): + """ + Construct a literal structure constant made of the given members. + """ + tys = [el.type for el in elems] + return cls(types.LiteralStructType(tys), elems) + + @property + def addrspace(self): + if not isinstance(self.type, types.PointerType): + raise TypeError("Only pointer constant have address spaces") + return self.type.addrspace + + def __eq__(self, other): + if isinstance(other, Constant): + return str(self) == str(other) + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(str(self)) + + def __repr__(self): + return "" % (self.type, self.constant) + + +class FormattedConstant(Constant): + """ + A constant with an already formatted IR representation. + """ + + def __init__(self, typ, constant): + assert isinstance(constant, str) + Constant.__init__(self, typ, constant) + + def _to_string(self): + return self.constant + + def _get_reference(self): + return self.constant + + +class NamedValue(_StrCaching, _StringReferenceCaching, Value): + """ + The base class for named values. + """ + name_prefix = '%' + deduplicate_name = True + + def __init__(self, parent, type, name): + assert parent is not None + assert isinstance(type, types.Type) + self.parent = parent + self.type = type + self._set_name(name) + + def _to_string(self): + buf = [] + if not isinstance(self.type, types.VoidType): + buf.append("{0} = ".format(self.get_reference())) + self.descr(buf) + return "".join(buf).rstrip() + + def descr(self, buf): + raise NotImplementedError + + def _get_name(self): + return self._name + + def _set_name(self, name): + name = self.parent.scope.register(name, + deduplicate=self.deduplicate_name) + self._name = name + + name = property(_get_name, _set_name) + + def _get_reference(self): + name = self.name + # Quote and escape value name + if '\\' in name or '"' in name: + name = name.replace('\\', '\\5c').replace('"', '\\22') + return '{0}"{1}"'.format(self.name_prefix, name) + + def __repr__(self): + return "" % ( + self.__class__.__name__, self.name, self.type) + + @property + def function_type(self): + ty = self.type + if isinstance(ty, types.PointerType): + ty = self.type.pointee + if isinstance(ty, types.FunctionType): + return ty + else: + raise TypeError("Not a function: {0}".format(self.type)) + + +class MetaDataString(NamedValue): + """ + A metadata string, i.e. a constant string used as a value in a metadata + node. + """ + + def __init__(self, parent, string): + super(MetaDataString, self).__init__(parent, + types.MetaDataType(), + name="") + self.string = string + + def descr(self, buf): + buf += (self.get_reference(), "\n") + + def _get_reference(self): + return '!"{0}"'.format(_escape_string(self.string)) + + _to_string = _get_reference + + def __eq__(self, other): + if isinstance(other, MetaDataString): + return self.string == other.string + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.string) + + +class MetaDataArgument(_StrCaching, _StringReferenceCaching, Value): + """ + An argument value to a function taking metadata arguments. + This can wrap any other kind of LLVM value. + + Do not instantiate directly, Builder.call() will create these + automatically. + """ + + def __init__(self, value): + assert isinstance(value, Value) + assert not isinstance(value.type, types.MetaDataType) + self.type = types.MetaDataType() + self.wrapped_value = value + + def _get_reference(self): + # e.g. "i32* %2" + return "{0} {1}".format(self.wrapped_value.type, + self.wrapped_value.get_reference()) + + _to_string = _get_reference + + +class NamedMetaData(object): + """ + A named metadata node. + + Do not instantiate directly, use Module.add_named_metadata() instead. + """ + + def __init__(self, parent): + self.parent = parent + self.operands = [] + + def add(self, md): + self.operands.append(md) + + +class MDValue(NamedValue): + """ + A metadata node's value, consisting of a sequence of elements ("operands"). + + Do not instantiate directly, use Module.add_metadata() instead. + """ + name_prefix = '!' + + def __init__(self, parent, values, name): + super(MDValue, self).__init__(parent, + types.MetaDataType(), + name=name) + self.operands = tuple(values) + parent.metadata.append(self) + + def descr(self, buf): + operands = [] + for op in self.operands: + if isinstance(op.type, types.MetaDataType): + if isinstance(op, Constant) and op.constant is None: + operands.append("null") + else: + operands.append(op.get_reference()) + else: + operands.append("{0} {1}".format(op.type, op.get_reference())) + operands = ', '.join(operands) + buf += ("!{{ {0} }}".format(operands), "\n") + + def _get_reference(self): + return self.name_prefix + str(self.name) + + def __eq__(self, other): + if isinstance(other, MDValue): + return self.operands == other.operands + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.operands) + + +class DIToken: + """ + A debug information enumeration value that should appear bare in + the emitted metadata. + + Use this to wrap known constants, e.g. the DW_* enumerations. + """ + + def __init__(self, value): + self.value = value + + +class DIValue(NamedValue): + """ + A debug information descriptor, containing key-value pairs. + + Do not instantiate directly, use Module.add_debug_info() instead. + """ + name_prefix = '!' + + def __init__(self, parent, is_distinct, kind, operands, name): + super(DIValue, self).__init__(parent, + types.MetaDataType(), + name=name) + self.is_distinct = is_distinct + self.kind = kind + self.operands = tuple(operands) + parent.metadata.append(self) + + def descr(self, buf): + if self.is_distinct: + buf += ("distinct ",) + operands = [] + for key, value in self.operands: + if value is None: + strvalue = "null" + elif value is True: + strvalue = "true" + elif value is False: + strvalue = "false" + elif isinstance(value, DIToken): + strvalue = value.value + elif isinstance(value, str): + strvalue = '"{}"'.format(_escape_string(value)) + elif isinstance(value, int): + strvalue = str(value) + elif isinstance(value, NamedValue): + strvalue = value.get_reference() + else: + raise TypeError("invalid operand type for debug info: %r" + % (value,)) + operands.append("{0}: {1}".format(key, strvalue)) + operands = ', '.join(operands) + buf += ("!", self.kind, "(", operands, ")\n") + + def _get_reference(self): + return self.name_prefix + str(self.name) + + def __eq__(self, other): + if isinstance(other, DIValue): + return self.is_distinct == other.is_distinct and \ + self.kind == other.kind and \ + self.operands == other.operands + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.is_distinct, self.kind, self.operands)) + + +class GlobalValue(NamedValue, _ConstOpMixin, _HasMetadata): + """ + A global value. + """ + name_prefix = '@' + deduplicate_name = False + + def __init__(self, *args, **kwargs): + super(GlobalValue, self).__init__(*args, **kwargs) + self.linkage = '' + self.storage_class = '' + self.section = '' + self.metadata = {} + + +class GlobalVariable(GlobalValue): + """ + A global variable. + """ + + def __init__(self, module, typ, name, addrspace=0): + assert isinstance(typ, types.Type) + super(GlobalVariable, self).__init__(module, typ.as_pointer(addrspace), + name=name) + self.value_type = typ + self.initializer = None + self.unnamed_addr = False + self.global_constant = False + self.addrspace = addrspace + self.align = None + self.parent.add_global(self) + + def descr(self, buf): + if self.global_constant: + kind = 'constant' + else: + kind = 'global' + + if not self.linkage: + # Default to external linkage + linkage = 'external' if self.initializer is None else '' + else: + linkage = self.linkage + + if linkage: + buf.append(linkage + " ") + if self.storage_class: + buf.append(self.storage_class + " ") + if self.unnamed_addr: + buf.append("unnamed_addr ") + if self.addrspace != 0: + buf.append('addrspace({0:d}) '.format(self.addrspace)) + + buf.append("{kind} {type}" .format(kind=kind, type=self.value_type)) + + if self.initializer is not None: + if self.initializer.type != self.value_type: + raise TypeError("got initializer of type %s " + "for global value type %s" + % (self.initializer.type, self.value_type)) + buf.append(" " + self.initializer.get_reference()) + elif linkage not in ('external', 'extern_weak'): + # emit 'undef' for non-external linkage GV + buf.append(" " + self.value_type(Undefined).get_reference()) + + if self.section: + buf.append(", section \"%s\"" % (self.section,)) + + if self.align is not None: + buf.append(", align %d" % (self.align,)) + + if self.metadata: + buf.append(self._stringify_metadata(leading_comma=True)) + + buf.append("\n") + + +class AttributeSet(set): + """A set of string attribute. + Only accept items listed in *_known*. + + Properties: + * Iterate in sorted order + """ + _known = () + + def __init__(self, args=()): + super().__init__() + if isinstance(args, str): + args = [args] + for name in args: + self.add(name) + + def _expand(self, name, typ): + return name + + def add(self, name): + if name not in self._known: + raise ValueError('unknown attr {!r} for {}'.format(name, self)) + return super(AttributeSet, self).add(name) + + def _to_list(self, typ): + return [self._expand(i, typ) for i in sorted(self)] + + +class FunctionAttributes(AttributeSet): + _known = frozenset([ + 'argmemonly', 'alwaysinline', 'builtin', 'cold', 'convergent', + 'inaccessiblememonly', 'inaccessiblemem_or_argmemonly', 'inlinehint', + 'jumptable', 'minsize', 'naked', 'nobuiltin', 'noduplicate', + 'noimplicitfloat', 'noinline', 'nonlazybind', 'norecurse', + 'noredzone', 'noreturn', 'nounwind', 'optnone', 'optsize', + 'readnone', 'readonly', 'returns_twice', 'sanitize_address', + 'sanitize_memory', 'sanitize_thread', 'ssp', + 'sspreg', 'sspstrong', 'uwtable']) + + def __init__(self, args=()): + self._alignstack = 0 + self._personality = None + super(FunctionAttributes, self).__init__(args) + + def add(self, name): + if ((name == 'alwaysinline' and 'noinline' in self) or + (name == 'noinline' and 'alwaysinline' in self)): + raise ValueError("Can't have alwaysinline and noinline") + + super().add(name) + + @property + def alignstack(self): + return self._alignstack + + @alignstack.setter + def alignstack(self, val): + assert val >= 0 + self._alignstack = val + + @property + def personality(self): + return self._personality + + @personality.setter + def personality(self, val): + assert val is None or isinstance(val, GlobalValue) + self._personality = val + + def _to_list(self, ret_type): + attrs = super()._to_list(ret_type) + if self.alignstack: + attrs.append('alignstack({0:d})'.format(self.alignstack)) + if self.personality: + attrs.append('personality {persty} {persfn}'.format( + persty=self.personality.type, + persfn=self.personality.get_reference())) + return attrs + + +class Function(GlobalValue): + """Represent a LLVM Function but does uses a Module as parent. + Global Values are stored as a set of dependencies (attribute `depends`). + """ + + def __init__(self, module, ftype, name): + assert isinstance(ftype, types.Type) + super(Function, self).__init__(module, ftype.as_pointer(), name=name) + self.ftype = ftype + self.scope = _utils.NameScope() + self.blocks = [] + self.attributes = FunctionAttributes() + self.args = tuple([Argument(self, t) + for t in ftype.args]) + self.return_value = ReturnValue(self, ftype.return_type) + self.parent.add_global(self) + self.calling_convention = '' + + @property + def module(self): + return self.parent + + @property + def entry_basic_block(self): + return self.blocks[0] + + @property + def basic_blocks(self): + return self.blocks + + def append_basic_block(self, name=''): + blk = Block(parent=self, name=name) + self.blocks.append(blk) + return blk + + def insert_basic_block(self, before, name=''): + """Insert block before + """ + blk = Block(parent=self, name=name) + self.blocks.insert(before, blk) + return blk + + def descr_prototype(self, buf): + """ + Describe the prototype ("head") of the function. + """ + state = "define" if self.blocks else "declare" + ret = self.return_value + args = ", ".join(str(a) for a in self.args) + name = self.get_reference() + attrs = ' ' + ' '.join(self.attributes._to_list( + self.ftype.return_type)) if self.attributes else '' + if any(self.args): + vararg = ', ...' if self.ftype.var_arg else '' + else: + vararg = '...' if self.ftype.var_arg else '' + linkage = self.linkage + cconv = self.calling_convention + prefix = " ".join(str(x) for x in [state, linkage, cconv, ret] if x) + metadata = self._stringify_metadata() + metadata = ' {}'.format(metadata) if metadata else '' + section = ' section "{}"'.format(self.section) if self.section else '' + pt_str = "{prefix} {name}({args}{vararg}){attrs}{section}{metadata}\n" + prototype = pt_str.format(prefix=prefix, name=name, args=args, + vararg=vararg, attrs=attrs, section=section, + metadata=metadata) + buf.append(prototype) + + def descr_body(self, buf): + """ + Describe of the body of the function. + """ + for blk in self.blocks: + blk.descr(buf) + + def descr(self, buf): + self.descr_prototype(buf) + if self.blocks: + buf.append("{\n") + self.descr_body(buf) + buf.append("}\n") + + def __str__(self): + buf = [] + self.descr(buf) + return "".join(buf) + + @property + def is_declaration(self): + return len(self.blocks) == 0 + + +class ArgumentAttributes(AttributeSet): + # List from + # https://releases.llvm.org/14.0.0/docs/LangRef.html#parameter-attributes + _known = MappingProxyType({ + # True (emit type), + # False (emit name only) + 'byref': True, + 'byval': True, + 'elementtype': True, + 'immarg': False, + 'inalloca': True, + 'inreg': False, + 'nest': False, + 'noalias': False, + 'nocapture': False, + 'nofree': False, + 'nonnull': False, + 'noundef': False, + 'preallocated': True, + 'returned': False, + 'signext': False, + 'sret': True, + 'swiftasync': False, + 'swifterror': False, + 'swiftself': False, + 'zeroext': False, + }) + + def __init__(self, args=()): + self._align = 0 + self._dereferenceable = 0 + self._dereferenceable_or_null = 0 + super(ArgumentAttributes, self).__init__(args) + + def _expand(self, name, typ): + requires_type = self._known.get(name) + if requires_type: + return f"{name}({typ.pointee})" + else: + return name + + @property + def align(self): + return self._align + + @align.setter + def align(self, val): + assert isinstance(val, int) and val >= 0 + self._align = val + + @property + def dereferenceable(self): + return self._dereferenceable + + @dereferenceable.setter + def dereferenceable(self, val): + assert isinstance(val, int) and val >= 0 + self._dereferenceable = val + + @property + def dereferenceable_or_null(self): + return self._dereferenceable_or_null + + @dereferenceable_or_null.setter + def dereferenceable_or_null(self, val): + assert isinstance(val, int) and val >= 0 + self._dereferenceable_or_null = val + + def _to_list(self, typ): + attrs = super()._to_list(typ) + if self.align: + attrs.append('align {0:d}'.format(self.align)) + if self.dereferenceable: + attrs.append('dereferenceable({0:d})'.format(self.dereferenceable)) + if self.dereferenceable_or_null: + dref = 'dereferenceable_or_null({0:d})' + attrs.append(dref.format(self.dereferenceable_or_null)) + return attrs + + +class _BaseArgument(NamedValue): + def __init__(self, parent, typ, name=''): + assert isinstance(typ, types.Type) + super(_BaseArgument, self).__init__(parent, typ, name=name) + self.parent = parent + self.attributes = ArgumentAttributes() + + def __repr__(self): + return "" % (self.__class__.__name__, self.name, + self.type) + + def add_attribute(self, attr): + self.attributes.add(attr) + + +class Argument(_BaseArgument): + """ + The specification of a function argument. + """ + + def __str__(self): + attrs = self.attributes._to_list(self.type) + if attrs: + return "{0} {1} {2}".format(self.type, ' '.join(attrs), + self.get_reference()) + else: + return "{0} {1}".format(self.type, self.get_reference()) + + +class ReturnValue(_BaseArgument): + """ + The specification of a function's return value. + """ + + def __str__(self): + attrs = self.attributes._to_list(self.type) + if attrs: + return "{0} {1}".format(' '.join(attrs), self.type) + else: + return str(self.type) + + +class Block(NamedValue): + """ + A LLVM IR basic block. A basic block is a sequence of + instructions whose execution always goes from start to end. That + is, a control flow instruction (branch) can only appear as the + last instruction, and incoming branches can only jump to the first + instruction. + """ + + def __init__(self, parent, name=''): + super(Block, self).__init__(parent, types.LabelType(), name=name) + self.scope = parent.scope + self.instructions = [] + self.terminator = None + + @property + def is_terminated(self): + return self.terminator is not None + + @property + def function(self): + return self.parent + + @property + def module(self): + return self.parent.module + + def descr(self, buf): + buf.append("{0}:\n".format(self._format_name())) + buf += [" {0}\n".format(instr) for instr in self.instructions] + + def replace(self, old, new): + """Replace an instruction""" + if old.type != new.type: + raise TypeError("new instruction has a different type") + pos = self.instructions.index(old) + self.instructions.remove(old) + self.instructions.insert(pos, new) + + for bb in self.parent.basic_blocks: + for instr in bb.instructions: + instr.replace_usage(old, new) + + def _format_name(self): + # Per the LLVM Language Ref on identifiers, names matching the following + # regex do not need to be quoted: [%@][-a-zA-Z$._][-a-zA-Z$._0-9]* + # Otherwise, the identifier must be quoted and escaped. + name = self.name + if not _SIMPLE_IDENTIFIER_RE.match(name): + name = name.replace('\\', '\\5c').replace('"', '\\22') + name = '"{0}"'.format(name) + return name + + +class BlockAddress(Value): + """ + The address of a basic block. + """ + + def __init__(self, function, basic_block): + assert isinstance(function, Function) + assert isinstance(basic_block, Block) + self.type = types.IntType(8).as_pointer() + self.function = function + self.basic_block = basic_block + + def __str__(self): + return '{0} {1}'.format(self.type, self.get_reference()) + + def get_reference(self): + return "blockaddress({0}, {1})".format( + self.function.get_reference(), + self.basic_block.get_reference()) diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b34e814005b5ddcb2e2777ebbb5ad265d7f9807b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__main__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..810b9bf8ee1e4fcb231becfa98e2a84a22690a03 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/__main__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/customize.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/customize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ebfe1235f729a2c10af1b2d8e6cbd02db8bc58a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/customize.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/refprune_proto.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/refprune_proto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2f4d5b28cc3d7274036763b186d911b8dc3d585 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/refprune_proto.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_binding.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_binding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7494071302de1a79156b34d0999c50c05a317787 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_binding.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_ir.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acde5770e091911e6ef1001362bc5c964793e9ce Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_ir.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_refprune.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_refprune.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a27bbfa8738d4b70e6339d54cca656150821b31 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_refprune.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_valuerepr.cpython-310.pyc b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_valuerepr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba9a21ecd7b0b6311fc3810d96f4ca8841c0d6b4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/llvmlite/tests/__pycache__/test_valuerepr.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/refprune_proto.py b/vllm/lib/python3.10/site-packages/llvmlite/tests/refprune_proto.py new file mode 100644 index 0000000000000000000000000000000000000000..3edc58f4b42b18e4280873491bb65a0e0635b1d0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/tests/refprune_proto.py @@ -0,0 +1,329 @@ +""" +Contains tests and a prototype implementation for the fanout algorithm in +the LLVM refprune pass. +""" + +try: + from graphviz import Digraph +except ImportError: + pass +from collections import defaultdict + +# The entry block. It's always the same. +ENTRY = "A" + + +# The following caseNN() functions returns a 3-tuple of +# (nodes, edges, expected). +# `nodes` maps BB nodes to incref/decref inside the block. +# `edges` maps BB nodes to their successor BB. +# `expected` maps BB-node with incref to a set of BB-nodes with the decrefs, or +# the value can be None, indicating invalid prune. + +def case1(): + edges = { + "A": ["B"], + "B": ["C", "D"], + "C": [], + "D": ["E", "F"], + "E": ["G"], + "F": [], + "G": ["H", "I"], + "I": ["G", "F"], + "H": ["J", "K"], + "J": ["L", "M"], + "K": [], + "L": ["Z"], + "M": ["Z", "O", "P"], + "O": ["Z"], + "P": ["Z"], + "Z": [], + } + nodes = defaultdict(list) + nodes["D"] = ["incref"] + nodes["H"] = ["decref"] + nodes["F"] = ["decref", "decref"] + expected = {"D": {"H", "F"}} + return nodes, edges, expected + + +def case2(): + edges = { + "A": ["B", "C"], + "B": ["C"], + "C": [], + } + nodes = defaultdict(list) + nodes["A"] = ["incref"] + nodes["B"] = ["decref"] + nodes["C"] = ["decref"] + expected = {"A": None} + return nodes, edges, expected + + +def case3(): + nodes, edges, _ = case1() + # adds an invalid edge + edges["H"].append("F") + expected = {"D": None} + return nodes, edges, expected + + +def case4(): + nodes, edges, _ = case1() + # adds an invalid edge + edges["H"].append("E") + expected = {"D": None} + return nodes, edges, expected + + +def case5(): + nodes, edges, _ = case1() + # adds backedge to go before incref + edges["B"].append("I") + expected = {"D": None} + return nodes, edges, expected + + +def case6(): + nodes, edges, _ = case1() + # adds backedge to go before incref + edges["I"].append("B") + expected = {"D": None} + return nodes, edges, expected + + +def case7(): + nodes, edges, _ = case1() + # adds forward jump outside + edges["I"].append("M") + expected = {"D": None} + return nodes, edges, expected + + +def case8(): + edges = { + "A": ["B", "C"], + "B": ["C"], + "C": [], + } + nodes = defaultdict(list) + nodes["A"] = ["incref"] + nodes["C"] = ["decref"] + expected = {"A": {"C"}} + return nodes, edges, expected + + +def case9(): + nodes, edges, _ = case8() + # adds back edge + edges["C"].append("B") + expected = {"A": None} + return nodes, edges, expected + + +def case10(): + nodes, edges, _ = case8() + # adds back edge to A + edges["C"].append("A") + expected = {"A": {"C"}} + return nodes, edges, expected + + +def case11(): + nodes, edges, _ = case8() + edges["C"].append("D") + edges["D"] = [] + expected = {"A": {"C"}} + return nodes, edges, expected + + +def case12(): + nodes, edges, _ = case8() + edges["C"].append("D") + edges["D"] = ["A"] + expected = {"A": {"C"}} + return nodes, edges, expected + + +def case13(): + nodes, edges, _ = case8() + edges["C"].append("D") + edges["D"] = ["B"] + expected = {"A": None} + return nodes, edges, expected + + +def make_predecessor_map(edges): + d = defaultdict(set) + for src, outgoings in edges.items(): + for dst in outgoings: + d[dst].add(src) + return d + + +class FanoutAlgorithm: + def __init__(self, nodes, edges, verbose=False): + self.nodes = nodes + self.edges = edges + self.rev_edges = make_predecessor_map(edges) + self.print = print if verbose else self._null_print + + def run(self): + return self.find_fanout_in_function() + + def _null_print(self, *args, **kwargs): + pass + + def find_fanout_in_function(self): + got = {} + for cur_node in self.edges: + for incref in (x for x in self.nodes[cur_node] if x == "incref"): + decref_blocks = self.find_fanout(cur_node) + self.print(">>", cur_node, "===", decref_blocks) + got[cur_node] = decref_blocks + return got + + def find_fanout(self, head_node): + decref_blocks = self.find_decref_candidates(head_node) + self.print("candidates", decref_blocks) + if not decref_blocks: + return None + if not self.verify_non_overlapping( + head_node, decref_blocks, entry=ENTRY + ): + return None + return set(decref_blocks) + + def verify_non_overlapping(self, head_node, decref_blocks, entry): + self.print("verify_non_overlapping".center(80, "-")) + # reverse walk for each decref_blocks + # they should end at head_node + todo = list(decref_blocks) + while todo: + cur_node = todo.pop() + visited = set() + + workstack = [cur_node] + del cur_node + while workstack: + cur_node = workstack.pop() + self.print("cur_node", cur_node, "|", workstack) + if cur_node in visited: + continue # skip + if cur_node == entry: + # Entry node + self.print( + "!! failed because we arrived at entry", cur_node + ) + return False + visited.add(cur_node) + # check all predecessors + self.print( + f" {cur_node} preds {self.get_predecessors(cur_node)}" + ) + for pred in self.get_predecessors(cur_node): + if pred in decref_blocks: + # reject because there's a predecessor in decref_blocks + self.print( + "!! reject because predecessor in decref_blocks" + ) + return False + if pred != head_node: + + workstack.append(pred) + + return True + + def get_successors(self, node): + return tuple(self.edges[node]) + + def get_predecessors(self, node): + return tuple(self.rev_edges[node]) + + def has_decref(self, node): + return "decref" in self.nodes[node] + + def walk_child_for_decref( + self, cur_node, path_stack, decref_blocks, depth=10 + ): + indent = " " * len(path_stack) + self.print(indent, "walk", path_stack, cur_node) + if depth <= 0: + return False # missing + if cur_node in path_stack: + if cur_node == path_stack[0]: + return False # reject interior node backedge + return True # skip + if self.has_decref(cur_node): + decref_blocks.add(cur_node) + self.print(indent, "found decref") + return True + + depth -= 1 + path_stack += (cur_node,) + found = False + for child in self.get_successors(cur_node): + if not self.walk_child_for_decref( + child, path_stack, decref_blocks + ): + found = False + break + else: + found = True + + self.print(indent, f"ret {found}") + return found + + def find_decref_candidates(self, cur_node): + # Forward pass + self.print("find_decref_candidates".center(80, "-")) + path_stack = (cur_node,) + found = False + decref_blocks = set() + for child in self.get_successors(cur_node): + if not self.walk_child_for_decref( + child, path_stack, decref_blocks + ): + found = False + break + else: + found = True + if not found: + return set() + else: + return decref_blocks + + +def check_once(): + nodes, edges, expected = case13() + + # Render graph + G = Digraph() + for node in edges: + G.node(node, shape="rect", label=f"{node}\n" + r"\l".join(nodes[node])) + for node, children in edges.items(): + for child in children: + G.edge(node, child) + + G.view() + + algo = FanoutAlgorithm(nodes, edges, verbose=True) + got = algo.run() + assert expected == got + + +def check_all(): + for k, fn in list(globals().items()): + if k.startswith("case"): + print(f"{fn}".center(80, "-")) + nodes, edges, expected = fn() + algo = FanoutAlgorithm(nodes, edges) + got = algo.run() + assert expected == got + print("ALL PASSED") + + +if __name__ == "__main__": + # check_once() + check_all() diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/test_binding.py b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_binding.py new file mode 100644 index 0000000000000000000000000000000000000000..215ee0f411dcfc4fa126fa101c06adfc55fc0b9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_binding.py @@ -0,0 +1,2585 @@ +import ctypes +import threading +from ctypes import CFUNCTYPE, c_int, c_int32 +from ctypes.util import find_library +import gc +import locale +import os +import platform +import re +import subprocess +import sys +import unittest +from contextlib import contextmanager +from tempfile import mkstemp + +from llvmlite import ir +from llvmlite import binding as llvm +from llvmlite.binding import ffi +from llvmlite.tests import TestCase + +llvm_version_major = llvm.llvm_version_info[0] + +# arvm7l needs extra ABI symbols to link successfully +if platform.machine() == 'armv7l': + llvm.load_library_permanently('libgcc_s.so.1') + + +def no_de_locale(): + cur = locale.setlocale(locale.LC_ALL) + try: + locale.setlocale(locale.LC_ALL, 'de_DE') + except locale.Error: + return True + else: + return False + finally: + locale.setlocale(locale.LC_ALL, cur) + + +asm_sum = r""" + ; ModuleID = '' + source_filename = "asm_sum.c" + target triple = "{triple}" + %struct.glob_type = type {{ i64, [2 x i64]}} + %struct.glob_type_vec = type {{ i64, <2 x i64>}} + + @glob = global i32 0 + @glob_b = global i8 0 + @glob_f = global float 1.5 + @glob_struct = global %struct.glob_type {{i64 0, [2 x i64] [i64 0, i64 0]}} + + define i32 @sum(i32 %.1, i32 %.2) {{ + %.3 = add i32 %.1, %.2 + %.4 = add i32 0, %.3 + ret i32 %.4 + }} + """ + +asm_sum2 = r""" + ; ModuleID = '' + target triple = "{triple}" + + define i32 @sum(i32 %.1, i32 %.2) {{ + %.3 = add i32 %.1, %.2 + ret i32 %.3 + }} + """ + +asm_sum3 = r""" + ; ModuleID = '' + target triple = "{triple}" + + define i64 @sum(i64 %.1, i64 %.2) {{ + %.3 = add i64 %.1, %.2 + %.4 = add i64 5, %.3 + %.5 = add i64 -5, %.4 + ret i64 %.5 + }} + """ + +asm_mul = r""" + ; ModuleID = '' + target triple = "{triple}" + @mul_glob = global i32 0 + + define i32 @mul(i32 %.1, i32 %.2) {{ + %.3 = mul i32 %.1, %.2 + ret i32 %.3 + }} + """ + +asm_square_sum = r""" + ; ModuleID = '' + target triple = "{triple}" + @mul_glob = global i32 0 + + declare i32 @sum(i32, i32) + define i32 @square_sum(i32 %.1, i32 %.2) {{ + %.3 = call i32 @sum(i32 %.1, i32 %.2) + %.4 = mul i32 %.3, %.3 + ret i32 %.4 + }} + """ + +asm_getversion = r""" + ; ModuleID = '' + target triple = "{triple}" + + declare i8* @Py_GetVersion() + + define void @getversion(i32 %.1, i32 %.2) {{ + %1 = call i8* @Py_GetVersion() + ret void + }} + """ + +if platform.python_implementation() == 'PyPy': + asm_getversion = asm_getversion.replace('Py_GetVersion', 'PyPy_GetVersion') + +# `fadd` used on integer inputs +asm_parse_error = r""" + ; ModuleID = '' + target triple = "{triple}" + + define i32 @sum(i32 %.1, i32 %.2) {{ + %.3 = fadd i32 %.1, %.2 + ret i32 %.3 + }} + """ + +# "%.bug" definition references itself +asm_verification_fail = r""" + ; ModuleID = '' + target triple = "{triple}" + + define void @sum() {{ + %.bug = add i32 1, %.bug + ret void + }} + """ + +asm_sum_declare = r""" + ; ModuleID = '' + target triple = "{triple}" + + declare i32 @sum(i32 %.1, i32 %.2) + """ + +asm_vararg_declare = r""" + ; ModuleID = '' + target triple = "{triple}" + + declare i32 @vararg(i32 %.1, ...) + """ + +asm_double_inaccurate = r""" + ; ModuleID = '' + target triple = "{triple}" + + define void @foo() {{ + %const = fadd fp128 0xLF3CB1CCF26FBC178452FB4EC7F91DEAD, 0xL00000000000000000000000000000001 + ret void + }} + """ # noqa E501 + +asm_double_locale = r""" + ; ModuleID = '' + target triple = "{triple}" + + define void @foo() {{ + %const = fadd double 0.0, 3.14 + ret void + }} + """ + + +asm_inlineasm = r""" + ; ModuleID = '' + target triple = "{triple}" + + define void @foo() {{ + call void asm sideeffect "nop", ""() + ret void + }} + """ + +asm_inlineasm2 = """ + ; ModuleID = '' + target triple = "{triple}" + + define void @inlineme() {{ + ret void + }} + + define i32 @caller(i32 %.1, i32 %.2) {{ + entry: + %stack = alloca i32 + store i32 %.1, i32* %stack + br label %main + main: + %loaded = load i32, i32* %stack + %.3 = add i32 %loaded, %.2 + %.4 = add i32 0, %.3 + call void @inlineme() + ret i32 %.4 + }} +""" + +asm_inlineasm3 = """ +; ModuleID = 'test.c' +source_filename = "test.c" +target triple = "{triple}" + +; Function Attrs: noinline nounwind optnone ssp uwtable +define void @inlineme() noinline !dbg !15 {{ + ret void, !dbg !18 +}} + +; Function Attrs: noinline nounwind optnone ssp uwtable +define i32 @foo(i32 %0, i32 %1) !dbg !19 {{ + %3 = alloca i32, align 4 + %4 = alloca i32, align 4 + store i32 %0, i32* %3, align 4 + call void @llvm.dbg.declare(metadata i32* %3, metadata !23, metadata !DIExpression()), !dbg !24 + store i32 %1, i32* %4, align 4 + call void @llvm.dbg.declare(metadata i32* %4, metadata !25, metadata !DIExpression()), !dbg !26 + call void @inlineme(), !dbg !27 + %5 = load i32, i32* %3, align 4, !dbg !28 + %6 = load i32, i32* %4, align 4, !dbg !29 + %7 = add nsw i32 %5, %6, !dbg !30 + ret i32 %7, !dbg !31 +}} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 + +attributes #1 = {{ nofree nosync nounwind readnone speculatable willreturn }} + +!llvm.module.flags = !{{!1, !2, !3, !4, !5, !6, !7, !8, !9, !10}} +!llvm.dbg.cu = !{{!11}} +!llvm.ident = !{{!14}} + +!0 = !{{i32 2, !"SDK Version", [2 x i32] [i32 12, i32 3]}} +!1 = !{{i32 7, !"Dwarf Version", i32 4}} +!2 = !{{i32 2, !"Debug Info Version", i32 3}} +!3 = !{{i32 1, !"wchar_size", i32 4}} +!4 = !{{i32 1, !"branch-target-enforcement", i32 0}} +!5 = !{{i32 1, !"sign-return-address", i32 0}} +!6 = !{{i32 1, !"sign-return-address-all", i32 0}} +!7 = !{{i32 1, !"sign-return-address-with-bkey", i32 0}} +!8 = !{{i32 7, !"PIC Level", i32 2}} +!9 = !{{i32 7, !"uwtable", i32 1}} +!10 = !{{i32 7, !"frame-pointer", i32 1}} +!11 = distinct !DICompileUnit(language: DW_LANG_C99, file: !12, producer: "Apple clang version 13.1.6 (clang-1316.0.21.2.3)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !13, splitDebugInlining: false, nameTableKind: None, sysroot: "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk", sdk: "MacOSX.sdk") +!12 = !DIFile(filename: "test.c", directory: "/") +!13 = !{{}} +!14 = !{{!"Apple clang version 13.1.6 (clang-1316.0.21.2.3)"}} +!15 = distinct !DISubprogram(name: "inlineme", scope: !12, file: !12, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !11, retainedNodes: !13) +!16 = !DISubroutineType(types: !17) +!17 = !{{null}} +!18 = !DILocation(line: 1, column: 22, scope: !15) +!19 = distinct !DISubprogram(name: "foo", scope: !12, file: !12, line: 3, type: !20, scopeLine: 3, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !11, retainedNodes: !13) +!20 = !DISubroutineType(types: !21) +!21 = !{{!22, !22, !22}} +!22 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!23 = !DILocalVariable(name: "a", arg: 1, scope: !19, file: !12, line: 3, type: !22) +!24 = !DILocation(line: 3, column: 13, scope: !19) +!25 = !DILocalVariable(name: "b", arg: 2, scope: !19, file: !12, line: 3, type: !22) +!26 = !DILocation(line: 3, column: 20, scope: !19) +!27 = !DILocation(line: 4, column: 5, scope: !19) +!28 = !DILocation(line: 5, column: 12, scope: !19) +!29 = !DILocation(line: 5, column: 16, scope: !19) +!30 = !DILocation(line: 5, column: 14, scope: !19) +!31 = !DILocation(line: 5, column: 5, scope: !19) +""" # noqa E501 + +licm_asm = r""" +; ModuleID = "" +target triple = "{triple}" + +define double @licm(i32 %0) {{ + %2 = alloca i32, align 4 + %3 = alloca double, align 8 + %4 = alloca i32, align 4 + %5 = alloca double, align 8 + store i32 %0, i32* %2, align 4 + store double 0.000000e+00, double* %3, align 8 + store i32 0, i32* %4, align 4 + br label %6 + +6: ; preds = %14, %1 + %7 = load i32, i32* %4, align 4 + %8 = load i32, i32* %2, align 4 + %9 = icmp slt i32 %7, %8 + br i1 %9, label %10, label %17 + +10: ; preds = %6 + store double 7.000000e+00, double* %5, align 8 + %11 = load double, double* %5, align 8 + %12 = load double, double* %3, align 8 + %13 = fadd double %12, %11 + store double %13, double* %3, align 8 + br label %14 + +14: ; preds = %10 + %15 = load i32, i32* %4, align 4 + %16 = add nsw i32 %15, 1 + store i32 %16, i32* %4, align 4 + br label %6 + +17: ; preds = %6 + %18 = load double, double* %3, align 8 + ret double %18 +}} +""" # noqa E501 + +asm_global_ctors = r""" + ; ModuleID = "" + target triple = "{triple}" + + @A = global i32 undef + + define void @ctor_A() + {{ + store i32 10, i32* @A + ret void + }} + + define void @dtor_A() + {{ + store i32 20, i32* @A + ret void + }} + + define i32 @foo() + {{ + %.2 = load i32, i32* @A + %.3 = add i32 %.2, 2 + ret i32 %.3 + }} + + @llvm.global_ctors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @ctor_A, i8* null}}] + @llvm.global_dtors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @dtor_A, i8* null}}] + """ # noqa E501 + +asm_ext_ctors = r""" + ; ModuleID = "" + target triple = "{triple}" + + @A = external global i32 + + define void @ctor_A() + {{ + store i32 10, i32* @A + ret void + }} + + define void @dtor_A() + {{ + store i32 20, i32* @A + ret void + }} + + define i32 @foo() + {{ + %.2 = load i32, i32* @A + %.3 = add i32 %.2, 2 + ret i32 %.3 + }} + + @llvm.global_ctors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @ctor_A, i8* null}}] + @llvm.global_dtors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @dtor_A, i8* null}}] + """ # noqa E501 + + +asm_nonalphanum_blocklabel = """; ModuleID = "" +target triple = "unknown-unknown-unknown" +target datalayout = "" + +define i32 @"foo"() +{ +"<>!*''#": + ret i32 12345 +} +""" # noqa W291 # trailing space needed for match later + + +asm_null_constant = r""" + ; ModuleID = '' + target triple = "{triple}" + + define void @foo(i64* %.1) {{ + ret void + }} + + define void @bar() {{ + call void @foo(i64* null) + ret void + }} +""" + + +riscv_asm_ilp32 = [ + 'addi\tsp, sp, -16', + 'sw\ta1, 8(sp)', + 'sw\ta2, 12(sp)', + 'fld\tft0, 8(sp)', + 'fmv.w.x\tft1, a0', + 'fcvt.d.s\tft1, ft1', + 'fadd.d\tft0, ft1, ft0', + 'fsd\tft0, 8(sp)', + 'lw\ta0, 8(sp)', + 'lw\ta1, 12(sp)', + 'addi\tsp, sp, 16', + 'ret' +] + + +riscv_asm_ilp32f = [ + 'addi\tsp, sp, -16', + 'sw\ta0, 8(sp)', + 'sw\ta1, 12(sp)', + 'fld\tft0, 8(sp)', + 'fcvt.d.s\tft1, fa0', + 'fadd.d\tft0, ft1, ft0', + 'fsd\tft0, 8(sp)', + 'lw\ta0, 8(sp)', + 'lw\ta1, 12(sp)', + 'addi\tsp, sp, 16', + 'ret' +] + + +riscv_asm_ilp32d = [ + 'fcvt.d.s\tft0, fa0', + 'fadd.d\tfa0, ft0, fa1', + 'ret' +] + + +asm_attributes = r""" +declare void @a_readonly_func(i8 *) readonly + +declare i8* @a_arg0_return_func(i8* returned, i32*) +""" + + +# This produces the following output from objdump: +# +# $ objdump -D 632.elf +# +# 632.elf: file format elf64-x86-64 +# +# +# Disassembly of section .text: +# +# 0000000000000000 <__arybo>: +# 0: 48 c1 e2 20 shl $0x20,%rdx +# 4: 48 09 c2 or %rax,%rdx +# 7: 48 89 d0 mov %rdx,%rax +# a: 48 c1 c0 3d rol $0x3d,%rax +# e: 48 31 d0 xor %rdx,%rax +# 11: 48 b9 01 20 00 04 80 movabs $0x7010008004002001,%rcx +# 18: 00 10 70 +# 1b: 48 0f af c8 imul %rax,%rcx + +issue_632_elf = \ + "7f454c4602010100000000000000000001003e00010000000000000000000000000000" \ + "0000000000e0000000000000000000000040000000000040000500010048c1e2204809" \ + "c24889d048c1c03d4831d048b90120000480001070480fafc800000000000000000000" \ + "0000000000000000000000000000002f0000000400f1ff000000000000000000000000" \ + "00000000070000001200020000000000000000001f00000000000000002e7465787400" \ + "5f5f617279626f002e6e6f74652e474e552d737461636b002e737472746162002e7379" \ + "6d746162003c737472696e673e00000000000000000000000000000000000000000000" \ + "0000000000000000000000000000000000000000000000000000000000000000000000" \ + "00000000000000001f0000000300000000000000000000000000000000000000a80000" \ + "0000000000380000000000000000000000000000000100000000000000000000000000" \ + "000001000000010000000600000000000000000000000000000040000000000000001f" \ + "000000000000000000000000000000100000000000000000000000000000000f000000" \ + "01000000000000000000000000000000000000005f0000000000000000000000000000" \ + "0000000000000000000100000000000000000000000000000027000000020000000000" \ + "0000000000000000000000000000600000000000000048000000000000000100000002" \ + "00000008000000000000001800000000000000" + + +issue_632_text = \ + "48c1e2204809c24889d048c1c03d4831d048b90120000480001070480fafc8" + + +asm_tli_exp2 = r""" +; ModuleID = '' +source_filename = "" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-windows-msvc" + +declare float @llvm.exp2.f32(float %casted) + +define float @foo(i16 %arg) { +entry: + %casted = sitofp i16 %arg to float + %ret = call float @llvm.exp2.f32(float %casted) + ret float %ret +} +""" # noqa E501 + +asm_phi_blocks = r""" +; ModuleID = '' +target triple = "{triple}" + +define void @foo(i32 %N) {{ + ; unnamed block for testing + %cmp4 = icmp sgt i32 %N, 0 + br i1 %cmp4, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void + +for.body: + %i.05 = phi i32 [ %inc, %for.body ], [ 0, %0 ] + %inc = add nuw nsw i32 %i.05, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +}} +""" + + +class BaseTest(TestCase): + + def setUp(self): + llvm.initialize() + llvm.initialize_native_target() + llvm.initialize_native_asmprinter() + gc.collect() + self.old_garbage = gc.garbage[:] + gc.garbage[:] = [] + + def tearDown(self): + # Test that no uncollectable objects were created + # (llvmlite objects have a __del__ so a reference cycle could + # create some). + gc.collect() + self.assertEqual(gc.garbage, []) + # This will probably put any existing garbage in gc.garbage again + del self.old_garbage + + def module(self, asm=asm_sum, context=None): + asm = asm.format(triple=llvm.get_default_triple()) + mod = llvm.parse_assembly(asm, context) + return mod + + def glob(self, name='glob', mod=None): + if mod is None: + mod = self.module() + return mod.get_global_variable(name) + + def target_machine(self, *, jit): + target = llvm.Target.from_default_triple() + return target.create_target_machine(jit=jit) + + +class TestDependencies(BaseTest): + """ + Test DLL dependencies are within a certain expected set. + """ + + @unittest.skipUnless(sys.platform.startswith('linux'), + "Linux-specific test") + @unittest.skipUnless(os.environ.get('LLVMLITE_DIST_TEST'), + "Distribution-specific test") + def test_linux(self): + lib_path = ffi.lib._name + env = os.environ.copy() + env['LANG'] = 'C' + p = subprocess.Popen(["objdump", "-p", lib_path], + stdout=subprocess.PIPE, env=env) + out, _ = p.communicate() + self.assertEqual(0, p.returncode) + # Parse library dependencies + lib_pat = re.compile(r'^([+-_a-zA-Z0-9]+)\.so(?:\.\d+){0,3}$') + deps = set() + for line in out.decode().splitlines(): + parts = line.split() + if parts and parts[0] == 'NEEDED': + dep = parts[1] + m = lib_pat.match(dep) + if len(parts) != 2 or not m: + self.fail("invalid NEEDED line: %r" % (line,)) + deps.add(m.group(1)) + # Sanity check that our dependencies were parsed ok + if 'libc' not in deps or 'libpthread' not in deps: + self.fail("failed parsing dependencies? got %r" % (deps,)) + # Ensure all dependencies are expected + allowed = set(['librt', 'libdl', 'libpthread', 'libz', 'libm', + 'libgcc_s', 'libc', 'ld-linux', 'ld64']) + if platform.python_implementation() == 'PyPy': + allowed.add('libtinfo') + + for dep in deps: + if not dep.startswith('ld-linux-') and dep not in allowed: + self.fail("unexpected dependency %r in %r" % (dep, deps)) + + +class TestRISCVABI(BaseTest): + """ + Test calling convention of floating point arguments of RISC-V + using different ABI. + """ + triple = "riscv32-unknown-linux" + + def setUp(self): + super().setUp() + llvm.initialize_all_targets() + llvm.initialize_all_asmprinters() + + def check_riscv_target(self): + try: + llvm.Target.from_triple(self.triple) + except RuntimeError as e: + if "No available targets are compatible with triple" in str(e): + self.skipTest("RISCV target unsupported by linked LLVM.") + else: + raise e + + def riscv_target_machine(self, **kwarg): + lltarget = llvm.Target.from_triple(self.triple) + return lltarget.create_target_machine(**kwarg) + + def fpadd_ll_module(self): + f64 = ir.DoubleType() + f32 = ir.FloatType() + fnty = ir.FunctionType(f64, (f32, f64)) + module = ir.Module() + func = ir.Function(module, fnty, name="fpadd") + block = func.append_basic_block() + builder = ir.IRBuilder(block) + a, b = func.args + arg0 = builder.fpext(a, f64) + result = builder.fadd(arg0, b) + builder.ret(result) + + llmod = llvm.parse_assembly(str(module)) + llmod.verify() + return llmod + + def break_up_asm(self, asm): + asm_list = [] + for line in asm.splitlines(): + s_line = line.strip() + if not (s_line.startswith(".") or s_line.startswith("fpadd") + or s_line == ""): + asm_list.append(s_line) + return asm_list + + def test_rv32d_ilp32(self): + self.check_riscv_target() + llmod = self.fpadd_ll_module() + target = self.riscv_target_machine(features="+f,+d", abiname="ilp32") + self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)), + riscv_asm_ilp32) + + def test_rv32d_ilp32f(self): + self.check_riscv_target() + llmod = self.fpadd_ll_module() + target = self.riscv_target_machine(features="+f,+d", abiname="ilp32f") + self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)), + riscv_asm_ilp32f) + + def test_rv32d_ilp32d(self): + self.check_riscv_target() + llmod = self.fpadd_ll_module() + target = self.riscv_target_machine(features="+f,+d", abiname="ilp32d") + self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)), + riscv_asm_ilp32d) + + +class TestMisc(BaseTest): + """ + Test miscellaneous functions in llvm.binding. + """ + + def test_parse_assembly(self): + self.module(asm_sum) + + def test_parse_assembly_error(self): + with self.assertRaises(RuntimeError) as cm: + self.module(asm_parse_error) + s = str(cm.exception) + self.assertIn("parsing error", s) + self.assertIn("invalid operand type", s) + + def test_nonalphanum_block_name(self): + mod = ir.Module() + ft = ir.FunctionType(ir.IntType(32), []) + fn = ir.Function(mod, ft, "foo") + bd = ir.IRBuilder(fn.append_basic_block(name="<>!*''#")) + bd.ret(ir.Constant(ir.IntType(32), 12345)) + asm = str(mod) + self.assertEqual(asm, asm_nonalphanum_blocklabel) + + def test_global_context(self): + gcontext1 = llvm.context.get_global_context() + gcontext2 = llvm.context.get_global_context() + assert gcontext1 == gcontext2 + + def test_dylib_symbols(self): + llvm.add_symbol("__xyzzy", 1234) + llvm.add_symbol("__xyzzy", 5678) + addr = llvm.address_of_symbol("__xyzzy") + self.assertEqual(addr, 5678) + addr = llvm.address_of_symbol("__foobar") + self.assertIs(addr, None) + + def test_get_default_triple(self): + triple = llvm.get_default_triple() + self.assertIsInstance(triple, str) + self.assertTrue(triple) + + def test_get_process_triple(self): + # Sometimes we get synonyms for PPC + def normalize_ppc(arch): + if arch == 'powerpc64le': + return 'ppc64le' + else: + return arch + + triple = llvm.get_process_triple() + default = llvm.get_default_triple() + self.assertIsInstance(triple, str) + self.assertTrue(triple) + + default_arch = normalize_ppc(default.split('-')[0]) + triple_arch = normalize_ppc(triple.split('-')[0]) + # Arch must be equal + self.assertEqual(default_arch, triple_arch) + + def test_get_host_cpu_features(self): + features = llvm.get_host_cpu_features() + # Check the content of `features` + self.assertIsInstance(features, dict) + self.assertIsInstance(features, llvm.FeatureMap) + for k, v in features.items(): + self.assertIsInstance(k, str) + self.assertTrue(k) # single feature string cannot be empty + self.assertIsInstance(v, bool) + self.assertIsInstance(features.flatten(), str) + + re_term = r"[+\-][a-zA-Z0-9\._-]+" + regex = r"^({0}|{0}(,{0})*)?$".format(re_term) + # quick check for our regex + self.assertIsNotNone(re.match(regex, "")) + self.assertIsNotNone(re.match(regex, "+aa")) + self.assertIsNotNone(re.match(regex, "+a,-bb")) + # check CpuFeature.flatten() + if len(features) == 0: + self.assertEqual(features.flatten(), "") + else: + self.assertIsNotNone(re.match(regex, features.flatten())) + + def test_get_host_cpu_name(self): + cpu = llvm.get_host_cpu_name() + self.assertIsInstance(cpu, str) + self.assertTrue(cpu) + + def test_initfini(self): + code = """if 1: + from llvmlite import binding as llvm + + llvm.initialize() + llvm.initialize_native_target() + llvm.initialize_native_asmprinter() + llvm.initialize_all_targets() + llvm.initialize_all_asmprinters() + llvm.shutdown() + """ + subprocess.check_call([sys.executable, "-c", code]) + + def test_set_option(self): + # We cannot set an option multiple times (LLVM would exit() the + # process), so run the code in a subprocess. + code = """if 1: + from llvmlite import binding as llvm + + llvm.set_option("progname", "-debug-pass=Disabled") + """ + subprocess.check_call([sys.executable, "-c", code]) + + def test_version(self): + major, minor, patch = llvm.llvm_version_info + # one of these can be valid + valid = (14, 15) + self.assertIn(major, valid) + self.assertIn(patch, range(8)) + + def test_check_jit_execution(self): + llvm.check_jit_execution() + + @unittest.skipIf(no_de_locale(), "Locale not available") + def test_print_double_locale(self): + m = self.module(asm_double_locale) + expect = str(m) + # Change the locale so that comma is used as decimal-point + # to trigger the LLVM bug (llvmlite issue #80) + locale.setlocale(locale.LC_ALL, 'de_DE') + # The LLVM bug is trigged by print the module with double constant + got = str(m) + # Changing the locale should not affect the LLVM IR + self.assertEqual(expect, got) + + def test_no_accidental_warnings(self): + code = "from llvmlite import binding" + flags = "-Werror" + cmdargs = [sys.executable, flags, "-c", code] + subprocess.check_call(cmdargs) + + +class TestModuleRef(BaseTest): + + def test_str(self): + mod = self.module() + s = str(mod).strip() + self.assertTrue(s.startswith('; ModuleID ='), s) + + def test_close(self): + mod = self.module() + str(mod) + mod.close() + with self.assertRaises(ctypes.ArgumentError): + str(mod) + mod.close() + + def test_with(self): + mod = self.module() + str(mod) + with mod: + str(mod) + with self.assertRaises(ctypes.ArgumentError): + str(mod) + with self.assertRaises(RuntimeError): + with mod: + pass + + def test_name(self): + mod = self.module() + mod.name = "foo" + self.assertEqual(mod.name, "foo") + mod.name = "bar" + self.assertEqual(mod.name, "bar") + + def test_source_file(self): + mod = self.module() + self.assertEqual(mod.source_file, "asm_sum.c") + + def test_data_layout(self): + mod = self.module() + s = mod.data_layout + self.assertIsInstance(s, str) + mod.data_layout = s + self.assertEqual(s, mod.data_layout) + + def test_triple(self): + mod = self.module() + s = mod.triple + self.assertEqual(s, llvm.get_default_triple()) + mod.triple = '' + self.assertEqual(mod.triple, '') + + def test_verify(self): + # Verify successful + mod = self.module() + self.assertIs(mod.verify(), None) + # Verify failed + mod = self.module(asm_verification_fail) + with self.assertRaises(RuntimeError) as cm: + mod.verify() + s = str(cm.exception) + self.assertIn("%.bug = add i32 1, %.bug", s) + + def test_get_function(self): + mod = self.module() + fn = mod.get_function("sum") + self.assertIsInstance(fn, llvm.ValueRef) + self.assertEqual(fn.name, "sum") + + with self.assertRaises(NameError): + mod.get_function("foo") + + # Check that fn keeps the module instance alive + del mod + str(fn.module) + + def test_get_struct_type(self): + mod = self.module() + st_ty = mod.get_struct_type("struct.glob_type") + self.assertEqual(st_ty.name, "struct.glob_type") + # also match struct names of form "%struct.glob_type.{some_index}" + self.assertIsNotNone(re.match( + r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }', + str(st_ty))) + + with self.assertRaises(NameError): + mod.get_struct_type("struct.doesnt_exist") + + def test_get_global_variable(self): + mod = self.module() + gv = mod.get_global_variable("glob") + self.assertIsInstance(gv, llvm.ValueRef) + self.assertEqual(gv.name, "glob") + + with self.assertRaises(NameError): + mod.get_global_variable("bar") + + # Check that gv keeps the module instance alive + del mod + str(gv.module) + + def test_global_variables(self): + mod = self.module() + it = mod.global_variables + del mod + globs = sorted(it, key=lambda value: value.name) + self.assertEqual(len(globs), 4) + self.assertEqual([g.name for g in globs], + ["glob", "glob_b", "glob_f", "glob_struct"]) + + def test_functions(self): + mod = self.module() + it = mod.functions + del mod + funcs = list(it) + self.assertEqual(len(funcs), 1) + self.assertEqual(funcs[0].name, "sum") + + def test_structs(self): + mod = self.module() + it = mod.struct_types + del mod + structs = list(it) + self.assertEqual(len(structs), 1) + self.assertIsNotNone(re.match(r'struct\.glob_type(\.[\d]+)?', + structs[0].name)) + self.assertIsNotNone(re.match( + r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }', + str(structs[0]))) + + def test_link_in(self): + dest = self.module() + src = self.module(asm_mul) + dest.link_in(src) + self.assertEqual( + sorted(f.name for f in dest.functions), ["mul", "sum"]) + dest.get_function("mul") + dest.close() + with self.assertRaises(ctypes.ArgumentError): + src.get_function("mul") + + def test_link_in_preserve(self): + dest = self.module() + src2 = self.module(asm_mul) + dest.link_in(src2, preserve=True) + self.assertEqual( + sorted(f.name for f in dest.functions), ["mul", "sum"]) + dest.close() + self.assertEqual(sorted(f.name for f in src2.functions), ["mul"]) + src2.get_function("mul") + + def test_link_in_error(self): + # Raise an error by trying to link two modules with the same global + # definition "sum". + dest = self.module() + src = self.module(asm_sum2) + with self.assertRaises(RuntimeError) as cm: + dest.link_in(src) + self.assertIn("symbol multiply defined", str(cm.exception)) + + def test_as_bitcode(self): + mod = self.module() + bc = mod.as_bitcode() + # Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064 # noqa E501 + # and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092 # noqa E501 + bitcode_wrapper_magic = b'\xde\xc0\x17\x0b' + bitcode_magic = b'BC' + self.assertTrue(bc.startswith(bitcode_magic) or + bc.startswith(bitcode_wrapper_magic)) + + def test_parse_bitcode_error(self): + with self.assertRaises(RuntimeError) as cm: + llvm.parse_bitcode(b"") + self.assertIn("LLVM bitcode parsing error", str(cm.exception)) + # for llvm < 9 + if llvm.llvm_version_info[0] < 9: + self.assertIn("Invalid bitcode signature", str(cm.exception)) + else: + self.assertIn( + "file too small to contain bitcode header", str(cm.exception), + ) + + def test_bitcode_roundtrip(self): + # create a new context to avoid struct renaming + context1 = llvm.create_context() + bc = self.module(context=context1).as_bitcode() + context2 = llvm.create_context() + mod = llvm.parse_bitcode(bc, context2) + self.assertEqual(mod.as_bitcode(), bc) + + mod.get_function("sum") + mod.get_global_variable("glob") + + def test_cloning(self): + m = self.module() + cloned = m.clone() + self.assertIsNot(cloned, m) + self.assertEqual(cloned.as_bitcode(), m.as_bitcode()) + + +class JITTestMixin(object): + """ + Mixin for ExecutionEngine tests. + """ + + def get_sum(self, ee, func_name="sum"): + ee.finalize_object() + cfptr = ee.get_function_address(func_name) + self.assertTrue(cfptr) + return CFUNCTYPE(c_int, c_int, c_int)(cfptr) + + def test_run_code(self): + mod = self.module() + with self.jit(mod) as ee: + cfunc = self.get_sum(ee) + res = cfunc(2, -5) + self.assertEqual(-3, res) + + def test_close(self): + ee = self.jit(self.module()) + ee.close() + ee.close() + with self.assertRaises(ctypes.ArgumentError): + ee.finalize_object() + + def test_with(self): + ee = self.jit(self.module()) + with ee: + pass + with self.assertRaises(RuntimeError): + with ee: + pass + with self.assertRaises(ctypes.ArgumentError): + ee.finalize_object() + + def test_module_lifetime(self): + mod = self.module() + ee = self.jit(mod) + ee.close() + mod.close() + + def test_module_lifetime2(self): + mod = self.module() + ee = self.jit(mod) + mod.close() + ee.close() + + def test_add_module(self): + ee = self.jit(self.module()) + mod = self.module(asm_mul) + ee.add_module(mod) + with self.assertRaises(KeyError): + ee.add_module(mod) + self.assertFalse(mod.closed) + ee.close() + self.assertTrue(mod.closed) + + def test_add_module_lifetime(self): + ee = self.jit(self.module()) + mod = self.module(asm_mul) + ee.add_module(mod) + mod.close() + ee.close() + + def test_add_module_lifetime2(self): + ee = self.jit(self.module()) + mod = self.module(asm_mul) + ee.add_module(mod) + ee.close() + mod.close() + + def test_remove_module(self): + ee = self.jit(self.module()) + mod = self.module(asm_mul) + ee.add_module(mod) + ee.remove_module(mod) + with self.assertRaises(KeyError): + ee.remove_module(mod) + self.assertFalse(mod.closed) + ee.close() + self.assertFalse(mod.closed) + + def test_target_data(self): + mod = self.module() + ee = self.jit(mod) + td = ee.target_data + # A singleton is returned + self.assertIs(ee.target_data, td) + str(td) + del mod, ee + str(td) + + def test_target_data_abi_enquiries(self): + mod = self.module() + ee = self.jit(mod) + td = ee.target_data + gv_i32 = mod.get_global_variable("glob") + gv_i8 = mod.get_global_variable("glob_b") + gv_struct = mod.get_global_variable("glob_struct") + # A global is a pointer, it has the ABI size of a pointer + pointer_size = 4 if sys.maxsize < 2 ** 32 else 8 + for g in (gv_i32, gv_i8, gv_struct): + self.assertEqual(td.get_abi_size(g.type), pointer_size) + + self.assertEqual(td.get_pointee_abi_size(gv_i32.type), 4) + self.assertEqual(td.get_pointee_abi_alignment(gv_i32.type), 4) + + self.assertEqual(td.get_pointee_abi_size(gv_i8.type), 1) + self.assertIn(td.get_pointee_abi_alignment(gv_i8.type), (1, 2, 4)) + + self.assertEqual(td.get_pointee_abi_size(gv_struct.type), 24) + self.assertIn(td.get_pointee_abi_alignment(gv_struct.type), (4, 8)) + + def test_object_cache_notify(self): + notifies = [] + + def notify(mod, buf): + notifies.append((mod, buf)) + + mod = self.module() + ee = self.jit(mod) + ee.set_object_cache(notify) + + self.assertEqual(len(notifies), 0) + cfunc = self.get_sum(ee) + cfunc(2, -5) + self.assertEqual(len(notifies), 1) + # The right module object was found + self.assertIs(notifies[0][0], mod) + self.assertIsInstance(notifies[0][1], bytes) + + notifies[:] = [] + mod2 = self.module(asm_mul) + ee.add_module(mod2) + cfunc = self.get_sum(ee, "mul") + self.assertEqual(len(notifies), 1) + # The right module object was found + self.assertIs(notifies[0][0], mod2) + self.assertIsInstance(notifies[0][1], bytes) + + def test_object_cache_getbuffer(self): + notifies = [] + getbuffers = [] + + def notify(mod, buf): + notifies.append((mod, buf)) + + def getbuffer(mod): + getbuffers.append(mod) + + mod = self.module() + ee = self.jit(mod) + ee.set_object_cache(notify, getbuffer) + + # First return None from getbuffer(): the object is compiled normally + self.assertEqual(len(notifies), 0) + self.assertEqual(len(getbuffers), 0) + cfunc = self.get_sum(ee) + self.assertEqual(len(notifies), 1) + self.assertEqual(len(getbuffers), 1) + self.assertIs(getbuffers[0], mod) + sum_buffer = notifies[0][1] + + # Recreate a new EE, and use getbuffer() to return the previously + # compiled object. + + def getbuffer_successful(mod): + getbuffers.append(mod) + return sum_buffer + + notifies[:] = [] + getbuffers[:] = [] + # Use another source module to make sure it is ignored + mod = self.module(asm_mul) + ee = self.jit(mod) + ee.set_object_cache(notify, getbuffer_successful) + + self.assertEqual(len(notifies), 0) + self.assertEqual(len(getbuffers), 0) + cfunc = self.get_sum(ee) + self.assertEqual(cfunc(2, -5), -3) + self.assertEqual(len(notifies), 0) + self.assertEqual(len(getbuffers), 1) + + +class JITWithTMTestMixin(JITTestMixin): + + def test_emit_assembly(self): + """Test TargetMachineRef.emit_assembly()""" + target_machine = self.target_machine(jit=True) + mod = self.module() + ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive + raw_asm = target_machine.emit_assembly(mod) + self.assertIn("sum", raw_asm) + target_machine.set_asm_verbosity(True) + raw_asm_verbose = target_machine.emit_assembly(mod) + self.assertIn("sum", raw_asm) + self.assertNotEqual(raw_asm, raw_asm_verbose) + + def test_emit_object(self): + """Test TargetMachineRef.emit_object()""" + target_machine = self.target_machine(jit=True) + mod = self.module() + ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive + code_object = target_machine.emit_object(mod) + self.assertIsInstance(code_object, bytes) + if sys.platform.startswith('linux'): + # Sanity check + self.assertIn(b"ELF", code_object[:10]) + + +class TestMCJit(BaseTest, JITWithTMTestMixin): + """ + Test JIT engines created with create_mcjit_compiler(). + """ + + def jit(self, mod, target_machine=None): + if target_machine is None: + target_machine = self.target_machine(jit=True) + return llvm.create_mcjit_compiler(mod, target_machine) + + +# There are some memory corruption issues with OrcJIT on AArch64 - see Issue +# #1000. Since OrcJIT is experimental, and we don't test regularly during +# llvmlite development on non-x86 platforms, it seems safest to skip these +# tests on non-x86 platforms. +@unittest.skipUnless(platform.machine().startswith("x86"), "x86 only") +class TestOrcLLJIT(BaseTest): + + def jit(self, asm=asm_sum, func_name="sum", target_machine=None, + add_process=False, func_type=CFUNCTYPE(c_int, c_int, c_int), + suppress_errors=False): + lljit = llvm.create_lljit_compiler(target_machine, + use_jit_link=False, + suppress_errors=suppress_errors) + builder = llvm.JITLibraryBuilder() + if add_process: + builder.add_current_process() + rt = builder\ + .add_ir(asm.format(triple=llvm.get_default_triple()))\ + .export_symbol(func_name)\ + .link(lljit, func_name) + cfptr = rt[func_name] + self.assertTrue(cfptr) + self.assertEqual(func_name, rt.name) + return lljit, rt, func_type(cfptr) + + # From test_dylib_symbols + def test_define_symbol(self): + lljit = llvm.create_lljit_compiler() + rt = llvm.JITLibraryBuilder().import_symbol("__xyzzy", 1234)\ + .export_symbol("__xyzzy").link(lljit, "foo") + self.assertEqual(rt["__xyzzy"], 1234) + + def test_lookup_undefined_symbol_fails(self): + lljit = llvm.create_lljit_compiler() + with self.assertRaisesRegex(RuntimeError, 'No such library'): + lljit.lookup("foo", "__foobar") + rt = llvm.JITLibraryBuilder().import_symbol("__xyzzy", 1234)\ + .export_symbol("__xyzzy").link(lljit, "foo") + self.assertNotEqual(rt["__xyzzy"], 0) + with self.assertRaisesRegex(RuntimeError, + 'Symbols not found.*__foobar'): + lljit.lookup("foo", "__foobar") + + def test_jit_link(self): + if sys.platform == "win32": + with self.assertRaisesRegex(RuntimeError, + 'JITLink .* Windows'): + llvm.create_lljit_compiler(use_jit_link=True) + else: + self.assertIsNotNone(llvm.create_lljit_compiler(use_jit_link=True)) + + def test_run_code(self): + (lljit, rt, cfunc) = self.jit() + with lljit: + res = cfunc(2, -5) + self.assertEqual(-3, res) + + def test_close(self): + (lljit, rt, cfunc) = self.jit() + lljit.close() + lljit.close() + with self.assertRaises(AssertionError): + lljit.lookup("foo", "fn") + + def test_with(self): + (lljit, rt, cfunc) = self.jit() + with lljit: + pass + with self.assertRaises(RuntimeError): + with lljit: + pass + with self.assertRaises(AssertionError): + lljit.lookup("foo", "fn") + + def test_add_ir_module(self): + (lljit, rt_sum, cfunc_sum) = self.jit() + rt_mul = llvm.JITLibraryBuilder() \ + .add_ir(asm_mul.format(triple=llvm.get_default_triple())) \ + .export_symbol("mul") \ + .link(lljit, "mul") + res = CFUNCTYPE(c_int, c_int, c_int)(rt_mul["mul"])(2, -5) + self.assertEqual(-10, res) + self.assertNotEqual(lljit.lookup("sum", "sum")["sum"], 0) + self.assertNotEqual(lljit.lookup("mul", "mul")["mul"], 0) + with self.assertRaises(RuntimeError): + lljit.lookup("sum", "mul") + with self.assertRaises(RuntimeError): + lljit.lookup("mul", "sum") + + def test_remove_module(self): + (lljit, rt_sum, _) = self.jit() + del rt_sum + gc.collect() + with self.assertRaises(RuntimeError): + lljit.lookup("sum", "sum") + lljit.close() + + def test_lib_depends(self): + (lljit, rt_sum, cfunc_sum) = self.jit() + rt_mul = llvm.JITLibraryBuilder() \ + .add_ir(asm_square_sum.format(triple=llvm.get_default_triple())) \ + .export_symbol("square_sum") \ + .add_jit_library("sum") \ + .link(lljit, "square_sum") + res = CFUNCTYPE(c_int, c_int, c_int)(rt_mul["square_sum"])(2, -5) + self.assertEqual(9, res) + + def test_target_data(self): + (lljit, rt, _) = self.jit() + td = lljit.target_data + # A singleton is returned + self.assertIs(lljit.target_data, td) + str(td) + del lljit + str(td) + + def test_global_ctors_dtors(self): + # test issue #303 + # (https://github.com/numba/llvmlite/issues/303) + shared_value = c_int32(0) + lljit = llvm.create_lljit_compiler() + builder = llvm.JITLibraryBuilder() + rt = builder \ + .add_ir(asm_ext_ctors.format(triple=llvm.get_default_triple())) \ + .import_symbol("A", ctypes.addressof(shared_value)) \ + .export_symbol("foo") \ + .link(lljit, "foo") + foo = rt["foo"] + self.assertTrue(foo) + self.assertEqual(CFUNCTYPE(c_int)(foo)(), 12) + del rt + self.assertNotEqual(shared_value.value, 20) + + def test_lookup_current_process_symbol_fails(self): + # An attempt to lookup a symbol in the current process (Py_GetVersion, + # in this case) should fail with an appropriate error if we have not + # enabled searching the current process for symbols. + msg = 'Failed to materialize symbols:.*getversion' + with self.assertRaisesRegex(RuntimeError, msg): + self.jit(asm_getversion, "getversion", suppress_errors=True) + + def test_lookup_current_process_symbol(self): + self.jit(asm_getversion, "getversion", None, True) + + def test_thread_safe(self): + lljit = llvm.create_lljit_compiler() + llvm_ir = asm_sum.format(triple=llvm.get_default_triple()) + + def compile_many(i): + def do_work(): + tracking = [] + for c in range(50): + tracking.append(llvm.JITLibraryBuilder() + .add_ir(llvm_ir) + .export_symbol("sum") + .link(lljit, f"sum_{i}_{c}")) + + return do_work + + ths = [threading.Thread(target=compile_many(i)) + for i in range(os.cpu_count())] + for th in ths: + th.start() + for th in ths: + th.join() + + def test_add_object_file(self): + target_machine = self.target_machine(jit=False) + mod = self.module() + lljit = llvm.create_lljit_compiler(target_machine) + rt = llvm.JITLibraryBuilder()\ + .add_object_img(target_machine.emit_object(mod))\ + .export_symbol("sum")\ + .link(lljit, "sum") + sum = CFUNCTYPE(c_int, c_int, c_int)(rt["sum"]) + self.assertEqual(sum(2, 3), 5) + + def test_add_object_file_from_filesystem(self): + target_machine = self.target_machine(jit=False) + mod = self.module() + obj_bin = target_machine.emit_object(mod) + temp_desc, temp_path = mkstemp() + + try: + with os.fdopen(temp_desc, "wb") as f: + f.write(obj_bin) + lljit = llvm.create_lljit_compiler(target_machine) + rt = llvm.JITLibraryBuilder() \ + .add_object_file(temp_path) \ + .export_symbol("sum") \ + .link(lljit, "sum") + sum = CFUNCTYPE(c_int, c_int, c_int)(rt["sum"]) + self.assertEqual(sum(2, 3), 5) + finally: + os.unlink(temp_path) + + +class TestValueRef(BaseTest): + + def test_str(self): + mod = self.module() + glob = mod.get_global_variable("glob") + self.assertEqual(str(glob), "@glob = global i32 0") + + def test_name(self): + mod = self.module() + glob = mod.get_global_variable("glob") + self.assertEqual(glob.name, "glob") + glob.name = "foobar" + self.assertEqual(glob.name, "foobar") + + def test_linkage(self): + mod = self.module() + glob = mod.get_global_variable("glob") + linkage = glob.linkage + self.assertIsInstance(glob.linkage, llvm.Linkage) + glob.linkage = linkage + self.assertEqual(glob.linkage, linkage) + for linkage in ("internal", "external"): + glob.linkage = linkage + self.assertIsInstance(glob.linkage, llvm.Linkage) + self.assertEqual(glob.linkage.name, linkage) + + def test_visibility(self): + mod = self.module() + glob = mod.get_global_variable("glob") + visibility = glob.visibility + self.assertIsInstance(glob.visibility, llvm.Visibility) + glob.visibility = visibility + self.assertEqual(glob.visibility, visibility) + for visibility in ("hidden", "protected", "default"): + glob.visibility = visibility + self.assertIsInstance(glob.visibility, llvm.Visibility) + self.assertEqual(glob.visibility.name, visibility) + + def test_storage_class(self): + mod = self.module() + glob = mod.get_global_variable("glob") + storage_class = glob.storage_class + self.assertIsInstance(glob.storage_class, llvm.StorageClass) + glob.storage_class = storage_class + self.assertEqual(glob.storage_class, storage_class) + for storage_class in ("dllimport", "dllexport", "default"): + glob.storage_class = storage_class + self.assertIsInstance(glob.storage_class, llvm.StorageClass) + self.assertEqual(glob.storage_class.name, storage_class) + + def test_add_function_attribute(self): + mod = self.module() + fn = mod.get_function("sum") + fn.add_function_attribute("nocapture") + with self.assertRaises(ValueError) as raises: + fn.add_function_attribute("zext") + self.assertEqual(str(raises.exception), "no such attribute 'zext'") + + def test_module(self): + mod = self.module() + glob = mod.get_global_variable("glob") + self.assertIs(glob.module, mod) + + def test_type(self): + mod = self.module() + glob = mod.get_global_variable("glob") + tp = glob.type + self.assertIsInstance(tp, llvm.TypeRef) + + def test_type_name(self): + mod = self.module() + glob = mod.get_global_variable("glob") + tp = glob.type + self.assertEqual(tp.name, "") + st = mod.get_global_variable("glob_struct") + self.assertIsNotNone(re.match(r"struct\.glob_type(\.[\d]+)?", + st.type.element_type.name)) + + def test_type_printing_variable(self): + mod = self.module() + glob = mod.get_global_variable("glob") + tp = glob.type + self.assertEqual(str(tp), 'i32*') + + def test_type_printing_function(self): + mod = self.module() + fn = mod.get_function("sum") + self.assertEqual(str(fn.type), "i32 (i32, i32)*") + + def test_type_printing_struct(self): + mod = self.module() + st = mod.get_global_variable("glob_struct") + self.assertTrue(st.type.is_pointer) + self.assertIsNotNone(re.match(r'%struct\.glob_type(\.[\d]+)?\*', + str(st.type))) + self.assertIsNotNone(re.match( + r"%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }", + str(st.type.element_type))) + + def test_close(self): + glob = self.glob() + glob.close() + glob.close() + + def test_is_declaration(self): + defined = self.module().get_function('sum') + declared = self.module(asm_sum_declare).get_function('sum') + self.assertFalse(defined.is_declaration) + self.assertTrue(declared.is_declaration) + + def test_module_global_variables(self): + mod = self.module(asm_sum) + gvars = list(mod.global_variables) + self.assertEqual(len(gvars), 4) + for v in gvars: + self.assertTrue(v.is_global) + + def test_module_functions(self): + mod = self.module() + funcs = list(mod.functions) + self.assertEqual(len(funcs), 1) + func = funcs[0] + self.assertTrue(func.is_function) + self.assertEqual(func.name, 'sum') + + with self.assertRaises(ValueError): + func.instructions + with self.assertRaises(ValueError): + func.operands + with self.assertRaises(ValueError): + func.opcode + + def test_function_arguments(self): + mod = self.module() + func = mod.get_function('sum') + self.assertTrue(func.is_function) + args = list(func.arguments) + self.assertEqual(len(args), 2) + self.assertTrue(args[0].is_argument) + self.assertTrue(args[1].is_argument) + self.assertEqual(args[0].name, '.1') + self.assertEqual(str(args[0].type), 'i32') + self.assertEqual(args[1].name, '.2') + self.assertEqual(str(args[1].type), 'i32') + + with self.assertRaises(ValueError): + args[0].blocks + with self.assertRaises(ValueError): + args[0].arguments + + def test_function_blocks(self): + func = self.module().get_function('sum') + blocks = list(func.blocks) + self.assertEqual(len(blocks), 1) + block = blocks[0] + self.assertTrue(block.is_block) + + def test_block_instructions(self): + func = self.module().get_function('sum') + insts = list(list(func.blocks)[0].instructions) + self.assertEqual(len(insts), 3) + self.assertTrue(insts[0].is_instruction) + self.assertTrue(insts[1].is_instruction) + self.assertTrue(insts[2].is_instruction) + self.assertEqual(insts[0].opcode, 'add') + self.assertEqual(insts[1].opcode, 'add') + self.assertEqual(insts[2].opcode, 'ret') + + def test_instruction_operands(self): + func = self.module().get_function('sum') + add = list(list(func.blocks)[0].instructions)[0] + self.assertEqual(add.opcode, 'add') + operands = list(add.operands) + self.assertEqual(len(operands), 2) + self.assertTrue(operands[0].is_operand) + self.assertTrue(operands[1].is_operand) + self.assertEqual(operands[0].name, '.1') + self.assertEqual(str(operands[0].type), 'i32') + self.assertEqual(operands[1].name, '.2') + self.assertEqual(str(operands[1].type), 'i32') + + def test_function_attributes(self): + mod = self.module(asm_attributes) + for func in mod.functions: + attrs = list(func.attributes) + if func.name == 'a_readonly_func': + self.assertEqual(attrs, [b'readonly']) + elif func.name == 'a_arg0_return_func': + self.assertEqual(attrs, []) + args = list(func.arguments) + self.assertEqual(list(args[0].attributes), [b'returned']) + self.assertEqual(list(args[1].attributes), []) + + def test_value_kind(self): + mod = self.module() + self.assertEqual(mod.get_global_variable('glob').value_kind, + llvm.ValueKind.global_variable) + func = mod.get_function('sum') + self.assertEqual(func.value_kind, llvm.ValueKind.function) + block = list(func.blocks)[0] + self.assertEqual(block.value_kind, llvm.ValueKind.basic_block) + inst = list(block.instructions)[1] + self.assertEqual(inst.value_kind, llvm.ValueKind.instruction) + self.assertEqual(list(inst.operands)[0].value_kind, + llvm.ValueKind.constant_int) + self.assertEqual(list(inst.operands)[1].value_kind, + llvm.ValueKind.instruction) + + iasm_func = self.module(asm_inlineasm).get_function('foo') + iasm_inst = list(list(iasm_func.blocks)[0].instructions)[0] + self.assertEqual(list(iasm_inst.operands)[0].value_kind, + llvm.ValueKind.inline_asm) + + def test_is_constant(self): + mod = self.module() + self.assertTrue(mod.get_global_variable('glob').is_constant) + constant_operands = 0 + for func in mod.functions: + self.assertTrue(func.is_constant) + for block in func.blocks: + self.assertFalse(block.is_constant) + for inst in block.instructions: + self.assertFalse(inst.is_constant) + for op in inst.operands: + if op.is_constant: + constant_operands += 1 + + self.assertEqual(constant_operands, 1) + + def test_constant_int(self): + mod = self.module() + func = mod.get_function('sum') + insts = list(list(func.blocks)[0].instructions) + self.assertEqual(insts[1].opcode, 'add') + operands = list(insts[1].operands) + self.assertTrue(operands[0].is_constant) + self.assertFalse(operands[1].is_constant) + self.assertEqual(operands[0].get_constant_value(), 0) + with self.assertRaises(ValueError): + operands[1].get_constant_value() + + mod = self.module(asm_sum3) + func = mod.get_function('sum') + insts = list(list(func.blocks)[0].instructions) + posint64 = list(insts[1].operands)[0] + negint64 = list(insts[2].operands)[0] + self.assertEqual(posint64.get_constant_value(), 5) + self.assertEqual(negint64.get_constant_value(signed_int=True), -5) + + # Convert from unsigned arbitrary-precision integer to signed i64 + as_u64 = negint64.get_constant_value(signed_int=False) + as_i64 = int.from_bytes(as_u64.to_bytes(8, 'little'), 'little', + signed=True) + self.assertEqual(as_i64, -5) + + def test_constant_fp(self): + mod = self.module(asm_double_locale) + func = mod.get_function('foo') + insts = list(list(func.blocks)[0].instructions) + self.assertEqual(len(insts), 2) + self.assertEqual(insts[0].opcode, 'fadd') + operands = list(insts[0].operands) + self.assertTrue(operands[0].is_constant) + self.assertAlmostEqual(operands[0].get_constant_value(), 0.0) + self.assertTrue(operands[1].is_constant) + self.assertAlmostEqual(operands[1].get_constant_value(), 3.14) + + mod = self.module(asm_double_inaccurate) + func = mod.get_function('foo') + inst = list(list(func.blocks)[0].instructions)[0] + operands = list(inst.operands) + with self.assertRaises(ValueError): + operands[0].get_constant_value() + self.assertAlmostEqual(operands[1].get_constant_value(round_fp=True), 0) + + def test_constant_as_string(self): + mod = self.module(asm_null_constant) + func = mod.get_function('bar') + inst = list(list(func.blocks)[0].instructions)[0] + arg = list(inst.operands)[0] + self.assertTrue(arg.is_constant) + self.assertEqual(arg.get_constant_value(), 'i64* null') + + def test_incoming_phi_blocks(self): + mod = self.module(asm_phi_blocks) + func = mod.get_function('foo') + blocks = list(func.blocks) + instructions = list(blocks[-1].instructions) + self.assertTrue(instructions[0].is_instruction) + self.assertEqual(instructions[0].opcode, 'phi') + + incoming_blocks = list(instructions[0].incoming_blocks) + self.assertEqual(len(incoming_blocks), 2) + self.assertTrue(incoming_blocks[0].is_block) + self.assertTrue(incoming_blocks[1].is_block) + # Test reference to blocks (named or unnamed) + self.assertEqual(incoming_blocks[0], blocks[-1]) + self.assertEqual(incoming_blocks[1], blocks[0]) + + # Test case that should fail + self.assertNotEqual(instructions[1].opcode, 'phi') + with self.assertRaises(ValueError): + instructions[1].incoming_blocks + + +class TestTypeRef(BaseTest): + + def test_str(self): + mod = self.module() + glob = mod.get_global_variable("glob") + self.assertEqual(str(glob.type), "i32*") + glob_struct_type = mod.get_struct_type("struct.glob_type") + self.assertEqual(str(glob_struct_type), + "%struct.glob_type = type { i64, [2 x i64] }") + + elements = list(glob_struct_type.elements) + self.assertEqual(len(elements), 2) + self.assertEqual(str(elements[0]), "i64") + self.assertEqual(str(elements[1]), "[2 x i64]") + + def test_type_kind(self): + mod = self.module() + glob = mod.get_global_variable("glob") + self.assertEqual(glob.type.type_kind, llvm.TypeKind.pointer) + self.assertTrue(glob.type.is_pointer) + + glob_struct = mod.get_global_variable("glob_struct") + self.assertEqual(glob_struct.type.type_kind, llvm.TypeKind.pointer) + self.assertTrue(glob_struct.type.is_pointer) + + stype = next(iter(glob_struct.type.elements)) + self.assertEqual(stype.type_kind, llvm.TypeKind.struct) + self.assertTrue(stype.is_struct) + + stype_a, stype_b = stype.elements + self.assertEqual(stype_a.type_kind, llvm.TypeKind.integer) + self.assertEqual(stype_b.type_kind, llvm.TypeKind.array) + self.assertTrue(stype_b.is_array) + + glob_vec_struct_type = mod.get_struct_type("struct.glob_type_vec") + _, vector_type = glob_vec_struct_type.elements + self.assertEqual(vector_type.type_kind, llvm.TypeKind.vector) + self.assertTrue(vector_type.is_vector) + + funcptr = mod.get_function("sum").type + self.assertEqual(funcptr.type_kind, llvm.TypeKind.pointer) + functype, = funcptr.elements + self.assertEqual(functype.type_kind, llvm.TypeKind.function) + + def test_element_count(self): + mod = self.module() + glob_struct_type = mod.get_struct_type("struct.glob_type") + _, array_type = glob_struct_type.elements + self.assertEqual(array_type.element_count, 2) + with self.assertRaises(ValueError): + glob_struct_type.element_count + + def test_type_width(self): + mod = self.module() + glob_struct_type = mod.get_struct_type("struct.glob_type") + glob_vec_struct_type = mod.get_struct_type("struct.glob_type_vec") + integer_type, array_type = glob_struct_type.elements + _, vector_type = glob_vec_struct_type.elements + self.assertEqual(integer_type.type_width, 64) + self.assertEqual(vector_type.type_width, 64 * 2) + + # Structs and arrays are not primitive types + self.assertEqual(glob_struct_type.type_width, 0) + self.assertEqual(array_type.type_width, 0) + + def test_vararg_function(self): + # Variadic function + mod = self.module(asm_vararg_declare) + func = mod.get_function('vararg') + decltype = func.type.element_type + self.assertTrue(decltype.is_function_vararg) + + mod = self.module(asm_sum_declare) + func = mod.get_function('sum') + decltype = func.type.element_type + self.assertFalse(decltype.is_function_vararg) + + # test that the function pointer type cannot use is_function_vararg + self.assertTrue(func.type.is_pointer) + with self.assertRaises(ValueError) as raises: + func.type.is_function_vararg + self.assertIn("Type i32 (i32, i32)* is not a function", + str(raises.exception)) + + +class TestTarget(BaseTest): + + def test_from_triple(self): + f = llvm.Target.from_triple + with self.assertRaises(RuntimeError) as cm: + f("foobar") + self.assertIn("No available targets are compatible with", + str(cm.exception)) + triple = llvm.get_default_triple() + target = f(triple) + self.assertEqual(target.triple, triple) + target.close() + + def test_create_target_machine(self): + target = llvm.Target.from_triple(llvm.get_default_triple()) + # With the default settings + target.create_target_machine('', '', 1, 'default', 'default') + # With the host's CPU + cpu = llvm.get_host_cpu_name() + target.create_target_machine(cpu, '', 1, 'default', 'default') + + def test_name(self): + t = llvm.Target.from_triple(llvm.get_default_triple()) + u = llvm.Target.from_default_triple() + self.assertIsInstance(t.name, str) + self.assertEqual(t.name, u.name) + + def test_description(self): + t = llvm.Target.from_triple(llvm.get_default_triple()) + u = llvm.Target.from_default_triple() + self.assertIsInstance(t.description, str) + self.assertEqual(t.description, u.description) + + def test_str(self): + target = llvm.Target.from_triple(llvm.get_default_triple()) + s = str(target) + self.assertIn(target.name, s) + self.assertIn(target.description, s) + + +class TestTargetData(BaseTest): + + def target_data(self): + return llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") + + def test_get_abi_size(self): + td = self.target_data() + glob = self.glob() + self.assertEqual(td.get_abi_size(glob.type), 8) + + def test_get_pointee_abi_size(self): + td = self.target_data() + + glob = self.glob() + self.assertEqual(td.get_pointee_abi_size(glob.type), 4) + + glob = self.glob("glob_struct") + self.assertEqual(td.get_pointee_abi_size(glob.type), 24) + + def test_get_struct_element_offset(self): + td = self.target_data() + glob = self.glob("glob_struct") + + with self.assertRaises(ValueError): + td.get_element_offset(glob.type, 0) + + struct_type = glob.type.element_type + self.assertEqual(td.get_element_offset(struct_type, 0), 0) + self.assertEqual(td.get_element_offset(struct_type, 1), 8) + + +class TestTargetMachine(BaseTest): + + def test_add_analysis_passes(self): + tm = self.target_machine(jit=False) + pm = llvm.create_module_pass_manager() + tm.add_analysis_passes(pm) + + def test_target_data_from_tm(self): + tm = self.target_machine(jit=False) + td = tm.target_data + mod = self.module() + gv_i32 = mod.get_global_variable("glob") + # A global is a pointer, it has the ABI size of a pointer + pointer_size = 4 if sys.maxsize < 2 ** 32 else 8 + self.assertEqual(td.get_abi_size(gv_i32.type), pointer_size) + + +class TestPassManagerBuilder(BaseTest): + + def pmb(self): + return llvm.PassManagerBuilder() + + def test_old_api(self): + # Test the create_pass_manager_builder() factory function + pmb = llvm.create_pass_manager_builder() + pmb.inlining_threshold = 2 + pmb.opt_level = 3 + + def test_close(self): + pmb = self.pmb() + pmb.close() + pmb.close() + + def test_opt_level(self): + pmb = self.pmb() + self.assertIsInstance(pmb.opt_level, int) + for i in range(4): + pmb.opt_level = i + self.assertEqual(pmb.opt_level, i) + + def test_size_level(self): + pmb = self.pmb() + self.assertIsInstance(pmb.size_level, int) + for i in range(4): + pmb.size_level = i + self.assertEqual(pmb.size_level, i) + + def test_inlining_threshold(self): + pmb = self.pmb() + with self.assertRaises(NotImplementedError): + pmb.inlining_threshold + for i in (25, 80, 350): + pmb.inlining_threshold = i + + def test_disable_unroll_loops(self): + pmb = self.pmb() + self.assertIsInstance(pmb.disable_unroll_loops, bool) + for b in (True, False): + pmb.disable_unroll_loops = b + self.assertEqual(pmb.disable_unroll_loops, b) + + def test_loop_vectorize(self): + pmb = self.pmb() + self.assertIsInstance(pmb.loop_vectorize, bool) + for b in (True, False): + pmb.loop_vectorize = b + self.assertEqual(pmb.loop_vectorize, b) + + def test_slp_vectorize(self): + pmb = self.pmb() + self.assertIsInstance(pmb.slp_vectorize, bool) + for b in (True, False): + pmb.slp_vectorize = b + self.assertEqual(pmb.slp_vectorize, b) + + def test_populate_module_pass_manager(self): + pmb = self.pmb() + pm = llvm.create_module_pass_manager() + pmb.populate(pm) + pmb.close() + pm.close() + + def test_populate_function_pass_manager(self): + mod = self.module() + pmb = self.pmb() + pm = llvm.create_function_pass_manager(mod) + pmb.populate(pm) + pmb.close() + pm.close() + + +class PassManagerTestMixin(object): + + def pmb(self): + pmb = llvm.create_pass_manager_builder() + pmb.opt_level = 2 + pmb.inlining_threshold = 300 + return pmb + + def test_close(self): + pm = self.pm() + pm.close() + pm.close() + + +class TestModulePassManager(BaseTest, PassManagerTestMixin): + + def pm(self): + return llvm.create_module_pass_manager() + + def test_run(self): + pm = self.pm() + self.pmb().populate(pm) + mod = self.module() + orig_asm = str(mod) + pm.run(mod) + opt_asm = str(mod) + # Quick check that optimizations were run, should get: + # define i32 @sum(i32 %.1, i32 %.2) local_unnamed_addr #0 { + # %.X = add i32 %.2, %.1 + # ret i32 %.X + # } + # where X in %.X is 3 or 4 + opt_asm_split = opt_asm.splitlines() + for idx, l in enumerate(opt_asm_split): + if l.strip().startswith('ret i32'): + toks = {'%.3', '%.4'} + for t in toks: + if t in l: + break + else: + raise RuntimeError("expected tokens not found") + othertoken = (toks ^ {t}).pop() + + self.assertIn("%.3", orig_asm) + self.assertNotIn(othertoken, opt_asm) + break + else: + raise RuntimeError("expected IR not found") + + def test_run_with_remarks_successful_inline(self): + pm = self.pm() + pm.add_function_inlining_pass(70) + self.pmb().populate(pm) + mod = self.module(asm_inlineasm2) + (status, remarks) = pm.run_with_remarks(mod) + self.assertTrue(status) + # Inlining has happened? The remark will tell us. + self.assertIn("Passed", remarks) + self.assertIn("inlineme", remarks) + + def test_run_with_remarks_failed_inline(self): + pm = self.pm() + pm.add_function_inlining_pass(0) + self.pmb().populate(pm) + mod = self.module(asm_inlineasm3) + (status, remarks) = pm.run_with_remarks(mod) + self.assertTrue(status) + + # Inlining has not happened? The remark will tell us. + self.assertIn("Missed", remarks) + self.assertIn("inlineme", remarks) + self.assertIn("noinline function attribute", remarks) + + def test_run_with_remarks_inline_filter_out(self): + pm = self.pm() + pm.add_function_inlining_pass(70) + self.pmb().populate(pm) + mod = self.module(asm_inlineasm2) + (status, remarks) = pm.run_with_remarks(mod, remarks_filter="nothing") + self.assertTrue(status) + self.assertEqual("", remarks) + + def test_run_with_remarks_inline_filter_in(self): + pm = self.pm() + pm.add_function_inlining_pass(70) + self.pmb().populate(pm) + mod = self.module(asm_inlineasm2) + (status, remarks) = pm.run_with_remarks(mod, remarks_filter="inlin.*") + self.assertTrue(status) + self.assertIn("Passed", remarks) + self.assertIn("inlineme", remarks) + + +class TestFunctionPassManager(BaseTest, PassManagerTestMixin): + + def pm(self, mod=None): + mod = mod or self.module() + return llvm.create_function_pass_manager(mod) + + def test_initfini(self): + pm = self.pm() + pm.initialize() + pm.finalize() + + def test_run(self): + mod = self.module() + fn = mod.get_function("sum") + pm = self.pm(mod) + self.pmb().populate(pm) + mod.close() + orig_asm = str(fn) + pm.initialize() + pm.run(fn) + pm.finalize() + opt_asm = str(fn) + # Quick check that optimizations were run + self.assertIn("%.4", orig_asm) + self.assertNotIn("%.4", opt_asm) + + def test_run_with_remarks(self): + mod = self.module(licm_asm) + fn = mod.get_function("licm") + pm = self.pm(mod) + pm.add_licm_pass() + self.pmb().populate(pm) + mod.close() + + pm.initialize() + (ok, remarks) = pm.run_with_remarks(fn) + pm.finalize() + self.assertTrue(ok) + self.assertIn("Passed", remarks) + self.assertIn("licm", remarks) + + def test_run_with_remarks_filter_out(self): + mod = self.module(licm_asm) + fn = mod.get_function("licm") + pm = self.pm(mod) + pm.add_licm_pass() + self.pmb().populate(pm) + mod.close() + + pm.initialize() + (ok, remarks) = pm.run_with_remarks(fn, remarks_filter="nothing") + pm.finalize() + self.assertTrue(ok) + self.assertEqual("", remarks) + + def test_run_with_remarks_filter_in(self): + mod = self.module(licm_asm) + fn = mod.get_function("licm") + pm = self.pm(mod) + pm.add_licm_pass() + self.pmb().populate(pm) + mod.close() + + pm.initialize() + (ok, remarks) = pm.run_with_remarks(fn, remarks_filter="licm") + pm.finalize() + self.assertTrue(ok) + self.assertIn("Passed", remarks) + self.assertIn("licm", remarks) + + +class TestPasses(BaseTest, PassManagerTestMixin): + + def pm(self): + return llvm.create_module_pass_manager() + + def test_populate(self): + pm = self.pm() + pm.add_target_library_info("") # unspecified target triple + pm.add_constant_merge_pass() + pm.add_dead_arg_elimination_pass() + pm.add_function_attrs_pass() + pm.add_function_inlining_pass(225) + pm.add_global_dce_pass() + pm.add_global_optimizer_pass() + pm.add_ipsccp_pass() + pm.add_dead_code_elimination_pass() + pm.add_cfg_simplification_pass() + pm.add_gvn_pass() + pm.add_instruction_combining_pass() + pm.add_licm_pass() + pm.add_sccp_pass() + pm.add_sroa_pass() + pm.add_type_based_alias_analysis_pass() + pm.add_basic_alias_analysis_pass() + pm.add_loop_rotate_pass() + pm.add_region_info_pass() + pm.add_scalar_evolution_aa_pass() + pm.add_aggressive_dead_code_elimination_pass() + pm.add_aa_eval_pass() + pm.add_always_inliner_pass() + if llvm_version_major < 15: + pm.add_arg_promotion_pass(42) + pm.add_break_critical_edges_pass() + pm.add_dead_store_elimination_pass() + pm.add_reverse_post_order_function_attrs_pass() + pm.add_aggressive_instruction_combining_pass() + pm.add_internalize_pass() + pm.add_jump_threading_pass(7) + pm.add_lcssa_pass() + pm.add_loop_deletion_pass() + pm.add_loop_extractor_pass() + pm.add_single_loop_extractor_pass() + pm.add_loop_strength_reduce_pass() + pm.add_loop_simplification_pass() + pm.add_loop_unroll_pass() + pm.add_loop_unroll_and_jam_pass() + if llvm_version_major < 15: + pm.add_loop_unswitch_pass() + pm.add_lower_atomic_pass() + pm.add_lower_invoke_pass() + pm.add_lower_switch_pass() + pm.add_memcpy_optimization_pass() + pm.add_merge_functions_pass() + pm.add_merge_returns_pass() + pm.add_partial_inlining_pass() + pm.add_prune_exception_handling_pass() + pm.add_reassociate_expressions_pass() + pm.add_demote_register_to_memory_pass() + pm.add_sink_pass() + pm.add_strip_symbols_pass() + pm.add_strip_dead_debug_info_pass() + pm.add_strip_dead_prototypes_pass() + pm.add_strip_debug_declare_pass() + pm.add_strip_nondebug_symbols_pass() + pm.add_tail_call_elimination_pass() + pm.add_basic_aa_pass() + pm.add_dependence_analysis_pass() + pm.add_dot_call_graph_pass() + pm.add_dot_cfg_printer_pass() + pm.add_dot_dom_printer_pass() + pm.add_dot_postdom_printer_pass() + pm.add_globals_mod_ref_aa_pass() + pm.add_iv_users_pass() + pm.add_lazy_value_info_pass() + pm.add_lint_pass() + pm.add_module_debug_info_pass() + pm.add_refprune_pass() + pm.add_instruction_namer_pass() + + @unittest.skipUnless(platform.machine().startswith("x86"), "x86 only") + def test_target_library_info_behavior(self): + """Test a specific situation that demonstrate TLI is affecting + optimization. See https://github.com/numba/numba/issues/8898. + """ + def run(use_tli): + mod = llvm.parse_assembly(asm_tli_exp2) + target = llvm.Target.from_triple(mod.triple) + tm = target.create_target_machine() + pm = llvm.ModulePassManager() + tm.add_analysis_passes(pm) + if use_tli: + pm.add_target_library_info(mod.triple) + pm.add_instruction_combining_pass() + pm.run(mod) + return mod + + # Run with TLI should suppress transformation of exp2 -> ldexpf + mod = run(use_tli=True) + self.assertIn("call float @llvm.exp2.f32", str(mod)) + + # Run without TLI will enable the transformation + mod = run(use_tli=False) + self.assertNotIn("call float @llvm.exp2.f32", str(mod)) + self.assertIn("call float @ldexpf", str(mod)) + + def test_instruction_namer_pass(self): + asm = asm_inlineasm3.format(triple=llvm.get_default_triple()) + mod = llvm.parse_assembly(asm) + + # Run instnamer pass + pm = llvm.ModulePassManager() + pm.add_instruction_namer_pass() + pm.run(mod) + + # Test that unnamed instructions are now named + func = mod.get_function('foo') + first_block = next(func.blocks) + instructions = list(first_block.instructions) + self.assertEqual(instructions[0].name, 'i') + self.assertEqual(instructions[1].name, 'i2') + + +class TestDylib(BaseTest): + + def test_bad_library(self): + with self.assertRaises(RuntimeError): + llvm.load_library_permanently("zzzasdkf;jasd;l") + + @unittest.skipUnless(platform.system() in ["Linux"], + "test only works on Linux") + def test_libm(self): + libm = find_library("m") + llvm.load_library_permanently(libm) + + +class TestAnalysis(BaseTest): + def build_ir_module(self): + m = ir.Module() + ft = ir.FunctionType(ir.IntType(32), [ir.IntType(32), ir.IntType(32)]) + fn = ir.Function(m, ft, "foo") + bd = ir.IRBuilder(fn.append_basic_block()) + x, y = fn.args + z = bd.add(x, y) + bd.ret(z) + return m + + def test_get_function_cfg_on_ir(self): + mod = self.build_ir_module() + foo = mod.get_global('foo') + dot_showing_inst = llvm.get_function_cfg(foo) + dot_without_inst = llvm.get_function_cfg(foo, show_inst=False) + inst = "%.5 = add i32 %.1, %.2" + self.assertIn(inst, dot_showing_inst) + self.assertNotIn(inst, dot_without_inst) + + def test_function_cfg_on_llvm_value(self): + defined = self.module().get_function('sum') + dot_showing_inst = llvm.get_function_cfg(defined, show_inst=True) + dot_without_inst = llvm.get_function_cfg(defined, show_inst=False) + # Check "digraph" + prefix = 'digraph' + self.assertIn(prefix, dot_showing_inst) + self.assertIn(prefix, dot_without_inst) + # Check function name + fname = "CFG for 'sum' function" + self.assertIn(fname, dot_showing_inst) + self.assertIn(fname, dot_without_inst) + # Check instruction + inst = "%.3 = add i32 %.1, %.2" + self.assertIn(inst, dot_showing_inst) + self.assertNotIn(inst, dot_without_inst) + + +class TestTypeParsing(BaseTest): + @contextmanager + def check_parsing(self): + mod = ir.Module() + # Yield to caller and provide the module for adding + # new GV. + yield mod + # Caller yield back and continue with testing + asm = str(mod) + llvm.parse_assembly(asm) + + def test_literal_struct(self): + # Natural layout + with self.check_parsing() as mod: + typ = ir.LiteralStructType([ir.IntType(32)]) + gv = ir.GlobalVariable(mod, typ, "foo") + # Also test constant text repr + gv.initializer = ir.Constant(typ, [1]) + + # Packed layout + with self.check_parsing() as mod: + typ = ir.LiteralStructType([ir.IntType(32)], + packed=True) + gv = ir.GlobalVariable(mod, typ, "foo") + # Also test constant text repr + gv.initializer = ir.Constant(typ, [1]) + + +class TestGlobalConstructors(TestMCJit): + def test_global_ctors_dtors(self): + # test issue #303 + # (https://github.com/numba/llvmlite/issues/303) + mod = self.module(asm_global_ctors) + ee = self.jit(mod) + ee.finalize_object() + + ee.run_static_constructors() + + # global variable should have been initialized + ptr_addr = ee.get_global_value_address("A") + ptr_t = ctypes.POINTER(ctypes.c_int32) + ptr = ctypes.cast(ptr_addr, ptr_t) + self.assertEqual(ptr.contents.value, 10) + + foo_addr = ee.get_function_address("foo") + foo = ctypes.CFUNCTYPE(ctypes.c_int32)(foo_addr) + self.assertEqual(foo(), 12) + + ee.run_static_destructors() + + # destructor should have run + self.assertEqual(ptr.contents.value, 20) + + +class TestGlobalVariables(BaseTest): + def check_global_variable_linkage(self, linkage, has_undef=True): + # This test default initializer on global variables with different + # linkages. Some linkages requires an initializer be present, while + # it is optional for others. This test uses ``parse_assembly()`` + # to verify that we are adding an `undef` automatically if user didn't + # specific one for certain linkages. It is a IR syntax error if the + # initializer is not present for certain linkages e.g. "external". + mod = ir.Module() + typ = ir.IntType(32) + gv = ir.GlobalVariable(mod, typ, "foo") + gv.linkage = linkage + asm = str(mod) + # check if 'undef' is present + if has_undef: + self.assertIn('undef', asm) + else: + self.assertNotIn('undef', asm) + # parse assembly to ensure correctness + self.module(asm) + + def test_internal_linkage(self): + self.check_global_variable_linkage('internal') + + def test_common_linkage(self): + self.check_global_variable_linkage('common') + + def test_external_linkage(self): + self.check_global_variable_linkage('external', has_undef=False) + + def test_available_externally_linkage(self): + self.check_global_variable_linkage('available_externally') + + def test_private_linkage(self): + self.check_global_variable_linkage('private') + + def test_linkonce_linkage(self): + self.check_global_variable_linkage('linkonce') + + def test_weak_linkage(self): + self.check_global_variable_linkage('weak') + + def test_appending_linkage(self): + self.check_global_variable_linkage('appending') + + def test_extern_weak_linkage(self): + self.check_global_variable_linkage('extern_weak', has_undef=False) + + def test_linkonce_odr_linkage(self): + self.check_global_variable_linkage('linkonce_odr') + + def test_weak_odr_linkage(self): + self.check_global_variable_linkage('weak_odr') + + +@unittest.skipUnless(platform.machine().startswith('x86'), "only on x86") +class TestInlineAsm(BaseTest): + def test_inlineasm(self): + llvm.initialize_native_asmparser() + m = self.module(asm=asm_inlineasm) + tm = self.target_machine(jit=False) + asm = tm.emit_assembly(m) + self.assertIn('nop', asm) + + +class TestObjectFile(BaseTest): + + mod_asm = """ + ;ModuleID = + target triple = "{triple}" + + declare i32 @sum(i32 %.1, i32 %.2) + + define i32 @sum_twice(i32 %.1, i32 %.2) {{ + %.3 = call i32 @sum(i32 %.1, i32 %.2) + %.4 = call i32 @sum(i32 %.3, i32 %.3) + ret i32 %.4 + }} + """ + + def test_object_file(self): + target_machine = self.target_machine(jit=False) + mod = self.module() + obj_bin = target_machine.emit_object(mod) + obj = llvm.ObjectFileRef.from_data(obj_bin) + # Check that we have a text section, and that she has a name and data + has_text = False + last_address = -1 + for s in obj.sections(): + if s.is_text(): + has_text = True + self.assertIsNotNone(s.name()) + self.assertTrue(s.size() > 0) + self.assertTrue(len(s.data()) > 0) + self.assertIsNotNone(s.address()) + self.assertTrue(last_address < s.address()) + last_address = s.address() + break + self.assertTrue(has_text) + + def test_add_object_file(self): + target_machine = self.target_machine(jit=False) + mod = self.module() + obj_bin = target_machine.emit_object(mod) + obj = llvm.ObjectFileRef.from_data(obj_bin) + + jit = llvm.create_mcjit_compiler(self.module(self.mod_asm), + target_machine) + + jit.add_object_file(obj) + + sum_twice = CFUNCTYPE(c_int, c_int, c_int)( + jit.get_function_address("sum_twice")) + + self.assertEqual(sum_twice(2, 3), 10) + + def test_add_object_file_from_filesystem(self): + target_machine = self.target_machine(jit=False) + mod = self.module() + obj_bin = target_machine.emit_object(mod) + temp_desc, temp_path = mkstemp() + + try: + try: + f = os.fdopen(temp_desc, "wb") + f.write(obj_bin) + f.flush() + finally: + f.close() + + jit = llvm.create_mcjit_compiler(self.module(self.mod_asm), + target_machine) + + jit.add_object_file(temp_path) + finally: + os.unlink(temp_path) + + sum_twice = CFUNCTYPE(c_int, c_int, c_int)( + jit.get_function_address("sum_twice")) + + self.assertEqual(sum_twice(2, 3), 10) + + def test_get_section_content(self): + # See Issue #632 - section contents were getting truncated at null + # bytes. + elf = bytes.fromhex(issue_632_elf) + obj = llvm.ObjectFileRef.from_data(elf) + for s in obj.sections(): + if s.is_text(): + self.assertEqual(len(s.data()), 31) + self.assertEqual(s.data().hex(), issue_632_text) + + +class TestTimePasses(BaseTest): + def test_reporting(self): + mp = llvm.create_module_pass_manager() + + pmb = llvm.create_pass_manager_builder() + pmb.opt_level = 3 + pmb.populate(mp) + + try: + llvm.set_time_passes(True) + mp.run(self.module()) + mp.run(self.module()) + mp.run(self.module()) + finally: + report = llvm.report_and_reset_timings() + llvm.set_time_passes(False) + + self.assertIsInstance(report, str) + self.assertEqual(report.count("Pass execution timing report"), 1) + + def test_empty_report(self): + # Returns empty str if no data is collected + self.assertFalse(llvm.report_and_reset_timings()) + + +class TestLLVMLockCallbacks(BaseTest): + def test_lock_callbacks(self): + events = [] + + def acq(): + events.append('acq') + + def rel(): + events.append('rel') + + # register callback + llvm.ffi.register_lock_callback(acq, rel) + + # Check: events are initially empty + self.assertFalse(events) + # Call LLVM functions + llvm.create_module_pass_manager() + # Check: there must be at least one acq and one rel + self.assertIn("acq", events) + self.assertIn("rel", events) + + # unregister callback + llvm.ffi.unregister_lock_callback(acq, rel) + + # Check: removing non-existent callbacks will trigger a ValueError + with self.assertRaises(ValueError): + llvm.ffi.unregister_lock_callback(acq, rel) + + +if __name__ == "__main__": + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/test_ir.py b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_ir.py new file mode 100644 index 0000000000000000000000000000000000000000..28412967900081e02585412842f4da30dd4867b4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_ir.py @@ -0,0 +1,2729 @@ +""" +IR Construction Tests +""" + +import copy +import itertools +import pickle +import re +import textwrap +import unittest + +from . import TestCase +from llvmlite import ir +from llvmlite import binding as llvm + + +int1 = ir.IntType(1) +int8 = ir.IntType(8) +int16 = ir.IntType(16) +int32 = ir.IntType(32) +int64 = ir.IntType(64) +hlf = ir.HalfType() +flt = ir.FloatType() +dbl = ir.DoubleType() + + +class TestBase(TestCase): + """ + Utilities for IR tests. + """ + + def assertInText(self, pattern, text): + """ + Assert *pattern* is in *text*, ignoring any whitespace differences + (including newlines). + """ + + def escape(c): + if not c.isalnum() and not c.isspace(): + return '\\' + c + return c + + pattern = ''.join(map(escape, pattern)) + regex = re.sub(r'\s+', r'\\s*', pattern) + self.assertRegex(text, regex) + + def assert_ir_line(self, line, mod): + lines = [line.strip() for line in str(mod).splitlines()] + self.assertIn(line, lines) + + def assert_valid_ir(self, mod): + llvm.parse_assembly(str(mod)) + + def assert_pickle_correctly(self, irobject): + """Assert that the IR object pickles and unpickles correctly. + The IR string is equal and that their type is equal + """ + newobject = pickle.loads(pickle.dumps(irobject, protocol=-1)) + self.assertIs(irobject.__class__, newobject.__class__) + self.assertEqual(str(irobject), str(newobject)) + return newobject + + def module(self): + return ir.Module() + + def function(self, module=None, name='my_func'): + module = module or self.module() + fnty = ir.FunctionType(int32, (int32, int32, dbl, + ir.PointerType(int32))) + return ir.Function(module, fnty, name) + + def block(self, func=None, name=''): + func = func or self.function() + return func.append_basic_block(name) + + def descr(self, thing): + buf = [] + thing.descr(buf) + return "".join(buf) + + def _normalize_asm(self, asm): + asm = textwrap.dedent(asm) + # Normalize indent + asm = asm.replace("\n ", "\n ") + return asm + + def check_descr_regex(self, descr, asm): + expected = self._normalize_asm(asm) + self.assertRegex(descr, expected) + + def check_descr(self, descr, asm): + expected = self._normalize_asm(asm) + self.assertEqual(descr, expected) + + def check_block(self, block, asm): + self.check_descr(self.descr(block), asm) + + def check_block_regex(self, block, asm): + self.check_descr_regex(self.descr(block), asm) + + def check_module_body(self, module, asm): + expected = self._normalize_asm(asm) + actual = module._stringify_body() + self.assertEqual(actual.strip(), expected.strip()) + + def check_metadata(self, module, asm): + """ + Check module metadata against *asm*. + """ + expected = self._normalize_asm(asm) + actual = module._stringify_metadata() + self.assertEqual(actual.strip(), expected.strip()) + + def check_func_body(self, func, asm): + expected = self._normalize_asm(asm) + actual = self.descr(func) + actual = actual.partition('{')[2].rpartition('}')[0] + self.assertEqual(actual.strip(), expected.strip()) + + +class TestFunction(TestBase): + + proto = """i32 @"my_func"(i32 %".1", i32 %".2", double %".3", i32* %".4")""" + + def test_declare(self): + # A simple declaration + func = self.function() + asm = self.descr(func).strip() + self.assertEqual(asm.strip(), "declare %s" % self.proto) + + def test_declare_attributes(self): + # Now with function attributes + func = self.function() + func.attributes.add("optsize") + func.attributes.add("alwaysinline") + func.attributes.add("convergent") + func.attributes.alignstack = 16 + tp_pers = ir.FunctionType(int8, (), var_arg=True) + pers = ir.Function(self.module(), tp_pers, '__gxx_personality_v0') + func.attributes.personality = pers + asm = self.descr(func).strip() + self.assertEqual(asm, + ("declare %s alwaysinline convergent optsize " + "alignstack(16) " + "personality i8 (...)* @\"__gxx_personality_v0\"") % + self.proto) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_attributes(self): + # Now with parameter attributes + func = self.function() + func.args[0].add_attribute("zeroext") + func.args[1].attributes.dereferenceable = 5 + func.args[1].attributes.dereferenceable_or_null = 10 + func.args[3].attributes.align = 4 + func.args[3].add_attribute("nonnull") + func.return_value.add_attribute("noalias") + asm = self.descr(func).strip() + self.assertEqual(asm, + """declare noalias i32 @"my_func"(i32 zeroext %".1", i32 dereferenceable(5) dereferenceable_or_null(10) %".2", double %".3", i32* nonnull align 4 %".4")""" # noqa E501 + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_metadata(self): + # Now with function metadata + module = self.module() + func = self.function(module) + func.set_metadata('dbg', module.add_metadata([])) + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} !dbg !0' + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_section(self): + # Test function with section + func = self.function() + func.section = "a_section" + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} section "a_section"' + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_section_meta(self): + # Test function with section and metadata + module = self.module() + func = self.function(module) + func.section = "a_section" + func.set_metadata('dbg', module.add_metadata([])) + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} section "a_section" !dbg !0' + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_attr_meta(self): + # Test function with attributes and metadata + module = self.module() + func = self.function(module) + func.attributes.add("alwaysinline") + func.set_metadata('dbg', module.add_metadata([])) + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} alwaysinline !dbg !0' + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_attr_section(self): + # Test function with attributes and section + func = self.function() + func.attributes.add("optsize") + func.section = "a_section" + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} optsize section "a_section"') + # Check pickling + self.assert_pickle_correctly(func) + + def test_function_attr_section_meta(self): + # Test function with attributes, section and metadata + module = self.module() + func = self.function(module) + func.attributes.add("alwaysinline") + func.section = "a_section" + func.set_metadata('dbg', module.add_metadata([])) + asm = self.descr(func).strip() + self.assertEqual(asm, + f'declare {self.proto} alwaysinline section "a_section" !dbg !0' # noqa E501 + ) + # Check pickling + self.assert_pickle_correctly(func) + + def test_define(self): + # A simple definition + func = self.function() + func.attributes.add("alwaysinline") + block = func.append_basic_block('my_block') + builder = ir.IRBuilder(block) + builder.ret_void() + asm = self.descr(func) + self.check_descr(asm, """\ + define {proto} alwaysinline + {{ + my_block: + ret void + }} + """.format(proto=self.proto)) + + def test_declare_intrinsics(self): + module = self.module() + pint8 = int8.as_pointer() + + powi = module.declare_intrinsic('llvm.powi', [dbl]) + memset = module.declare_intrinsic('llvm.memset', [pint8, int32]) + memcpy = module.declare_intrinsic('llvm.memcpy', [pint8, pint8, int32]) + assume = module.declare_intrinsic('llvm.assume') + self.check_descr(self.descr(powi).strip(), """\ + declare double @"llvm.powi.f64"(double %".1", i32 %".2")""") + self.check_descr(self.descr(memset).strip(), """\ + declare void @"llvm.memset.p0i8.i32"(i8* %".1", i8 %".2", i32 %".3", i1 %".4")""") # noqa E501 + self.check_descr(self.descr(memcpy).strip(), """\ + declare void @"llvm.memcpy.p0i8.p0i8.i32"(i8* %".1", i8* %".2", i32 %".3", i1 %".4")""") # noqa E501 + self.check_descr(self.descr(assume).strip(), """\ + declare void @"llvm.assume"(i1 %".1")""") + + def test_redeclare_intrinsic(self): + module = self.module() + powi = module.declare_intrinsic('llvm.powi', [dbl]) + powi2 = module.declare_intrinsic('llvm.powi', [dbl]) + self.assertIs(powi, powi2) + + def test_pickling(self): + fn = self.function() + self.assert_pickle_correctly(fn) + + def test_alwaysinline_noinline_disallowed(self): + module = self.module() + func = self.function(module) + func.attributes.add('alwaysinline') + + msg = "Can't have alwaysinline and noinline" + with self.assertRaisesRegex(ValueError, msg): + func.attributes.add('noinline') + + def test_noinline_alwaysinline_disallowed(self): + module = self.module() + func = self.function(module) + func.attributes.add('noinline') + + msg = "Can't have alwaysinline and noinline" + with self.assertRaisesRegex(ValueError, msg): + func.attributes.add('alwaysinline') + + +class TestIR(TestBase): + + def test_unnamed_metadata(self): + # An unnamed metadata node + mod = self.module() + mod.add_metadata([int32(123), int8(42)]) + self.assert_ir_line("!0 = !{ i32 123, i8 42 }", mod) + self.assert_valid_ir(mod) + + def test_unnamed_metadata_2(self): + # Several unnamed metadata nodes + mod = self.module() + # First node has a literal metadata string + m0 = mod.add_metadata([int32(123), "kernel"]) + # Second node refers to the first one + m1 = mod.add_metadata([int64(456), m0]) + # Third node is the same as the second one + m2 = mod.add_metadata([int64(456), m0]) + self.assertIs(m2, m1) + # Fourth node refers to the first three + mod.add_metadata([m0, m1, m2]) + self.assert_ir_line('!0 = !{ i32 123, !"kernel" }', mod) + self.assert_ir_line('!1 = !{ i64 456, !0 }', mod) + self.assert_ir_line('!2 = !{ !0, !1, !1 }', mod) + + def test_unnamed_metadata_3(self): + # Passing nested metadata as a sequence + mod = self.module() + mod.add_metadata([int32(123), [int32(456)], [int32(789)], [int32(456)]]) + self.assert_ir_line('!0 = !{ i32 456 }', mod) + self.assert_ir_line('!1 = !{ i32 789 }', mod) + self.assert_ir_line('!2 = !{ i32 123, !0, !1, !0 }', mod) + + def test_metadata_string(self): + # Escaping contents of a metadata string + mod = self.module() + mod.add_metadata(["\"\\$"]) + self.assert_ir_line('!0 = !{ !"\\22\\5c$" }', mod) + + def test_named_metadata(self): + # Add a named metadata node and add metadata values to it + mod = self.module() + m0 = mod.add_metadata([int32(123)]) + m1 = mod.add_metadata([int64(456)]) + nmd = mod.add_named_metadata("foo") + nmd.add(m0) + nmd.add(m1) + nmd.add(m0) + self.assert_ir_line("!foo = !{ !0, !1, !0 }", mod) + self.assert_valid_ir(mod) + # Check get_named_metadata() + self.assertIs(nmd, mod.get_named_metadata("foo")) + with self.assertRaises(KeyError): + mod.get_named_metadata("bar") + + def test_named_metadata_2(self): + # Add and set named metadata through a single add_named_metadata() call + mod = self.module() + m0 = mod.add_metadata([int32(123)]) + mod.add_named_metadata("foo", m0) + mod.add_named_metadata("foo", [int64(456)]) + mod.add_named_metadata("foo", ["kernel"]) + mod.add_named_metadata("bar", []) + self.assert_ir_line("!foo = !{ !0, !1, !2 }", mod) + self.assert_ir_line("!0 = !{ i32 123 }", mod) + self.assert_ir_line("!1 = !{ i64 456 }", mod) + self.assert_ir_line('!2 = !{ !"kernel" }', mod) + self.assert_ir_line("!bar = !{ !3 }", mod) + self.assert_ir_line('!3 = !{ }', mod) + self.assert_valid_ir(mod) + + def test_metadata_null(self): + # A null metadata (typed) value + mod = self.module() + mod.add_metadata([int32.as_pointer()(None)]) + self.assert_ir_line("!0 = !{ i32* null }", mod) + self.assert_valid_ir(mod) + # A null metadata (untyped) value + mod = self.module() + mod.add_metadata([None, int32(123)]) + self.assert_ir_line("!0 = !{ null, i32 123 }", mod) + self.assert_valid_ir(mod) + + def test_debug_info(self): + # Add real world-looking debug information to a module + # (with various value types) + mod = self.module() + di_file = mod.add_debug_info("DIFile", { + "filename": "foo", + "directory": "bar", + }) + di_func_type = mod.add_debug_info("DISubroutineType", { + # None as `null` + "types": mod.add_metadata([None]), + }) + di_compileunit = mod.add_debug_info("DICompileUnit", { + "language": ir.DIToken("DW_LANG_Python"), + "file": di_file, + "producer": "ARTIQ", + "runtimeVersion": 0, + "isOptimized": True, + }, is_distinct=True) + mod.add_debug_info("DISubprogram", { + "name": "my_func", + "file": di_file, + "line": 11, + "type": di_func_type, + "isLocal": False, + "unit": di_compileunit, + }, is_distinct=True) + + # Check output + strmod = str(mod) + self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")', + strmod) + self.assert_ir_line('!1 = !{ null }', strmod) + self.assert_ir_line('!2 = !DISubroutineType(types: !1)', strmod) + # self.assert_ir_line('!4 = !{ !3 }', strmod) + self.assert_ir_line('!3 = distinct !DICompileUnit(file: !0, ' + 'isOptimized: true, language: DW_LANG_Python, ' + 'producer: "ARTIQ", runtimeVersion: 0)', + strmod) + self.assert_ir_line('!4 = distinct !DISubprogram(file: !0, isLocal: ' + 'false, line: 11, name: "my_func", type: !2, unit: ' + '!3)', + strmod) + self.assert_valid_ir(mod) + + def test_debug_info_2(self): + # Identical debug info nodes should be merged + mod = self.module() + di1 = mod.add_debug_info("DIFile", + {"filename": "foo", + "directory": "bar", + }) + di2 = mod.add_debug_info("DIFile", + {"filename": "foo", + "directory": "bar", + }) + di3 = mod.add_debug_info("DIFile", + {"filename": "bar", + "directory": "foo", + }) + di4 = mod.add_debug_info("DIFile", + {"filename": "foo", + "directory": "bar", + }, is_distinct=True) + self.assertIs(di1, di2) + self.assertEqual(len({di1, di2, di3, di4}), 3) + # Check output + strmod = str(mod) + self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")', + strmod) + self.assert_ir_line('!1 = !DIFile(directory: "foo", filename: "bar")', + strmod) + self.assert_ir_line('!2 = distinct !DIFile(directory: "bar", filename: ' + '"foo")', strmod) + self.assert_valid_ir(mod) + + def test_debug_info_gvar(self): + # This test defines a module with a global variable named 'gvar'. + # When the module is compiled and linked with a main function, gdb can + # be used to interpret and print the the value of 'gvar'. + mod = self.module() + + gvar = ir.GlobalVariable(mod, ir.FloatType(), 'gvar') + gvar.initializer = ir.Constant(ir.FloatType(), 42) + + di_float = mod.add_debug_info("DIBasicType", { + "name": "float", + "size": 32, + "encoding": ir.DIToken("DW_ATE_float") + }) + di_gvar = mod.add_debug_info("DIGlobalVariableExpression", { + "expr": mod.add_debug_info("DIExpression", {}), + "var": mod.add_debug_info("DIGlobalVariable", { + "name": gvar.name, + "type": di_float, + "isDefinition": True + }, is_distinct=True) + }) + gvar.set_metadata('dbg', di_gvar) + + # Check output + strmod = str(mod) + self.assert_ir_line('!0 = !DIBasicType(encoding: DW_ATE_float, ' + 'name: "float", size: 32)', strmod) + self.assert_ir_line('!1 = !DIExpression()', strmod) + self.assert_ir_line('!2 = distinct !DIGlobalVariable(isDefinition: ' + 'true, name: "gvar", type: !0)', strmod) + self.assert_ir_line('!3 = !DIGlobalVariableExpression(expr: !1, ' + 'var: !2)', strmod) + self.assert_ir_line('@"gvar" = global float 0x4045000000000000, ' + '!dbg !3', strmod) + + # The remaining debug info is not part of the automated test, but + # can be used to produce an object file that can be loaded into a + # debugger to print the value of gvar. This can be done by printing the + # module then compiling it with clang and inspecting with gdb: + # + # clang test_debug_info_gvar.ll -c + # printf "file test_debug_info_gvar.o \n p gvar" | gdb + # + # Which should result in the output: + # + # (gdb) $1 = 42 + + dver = [ir.IntType(32)(2), 'Dwarf Version', ir.IntType(32)(4)] + diver = [ir.IntType(32)(2), 'Debug Info Version', ir.IntType(32)(3)] + dver = mod.add_metadata(dver) + diver = mod.add_metadata(diver) + flags = mod.add_named_metadata('llvm.module.flags') + flags.add(dver) + flags.add(diver) + + di_file = mod.add_debug_info("DIFile", { + "filename": "foo", + "directory": "bar", + }) + di_cu = mod.add_debug_info("DICompileUnit", { + "language": ir.DIToken("DW_LANG_Python"), + "file": di_file, + 'emissionKind': ir.DIToken('FullDebug'), + "globals": mod.add_metadata([di_gvar]) + }, is_distinct=True) + mod.add_named_metadata('llvm.dbg.cu', di_cu) + + def test_debug_info_unicode_string(self): + mod = self.module() + mod.add_debug_info("DILocalVariable", {"name": "a∆"}) + # Check output + strmod = str(mod) + # The unicode character is utf8 encoded with \XX format, where XX is hex + name = ''.join(map(lambda x: f"\\{x:02x}", "∆".encode())) + self.assert_ir_line(f'!0 = !DILocalVariable(name: "a{name}")', strmod) + + def test_inline_assembly(self): + mod = self.module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo') + builder = ir.IRBuilder(foo.append_basic_block('')) + asmty = ir.FunctionType(int32, [int32]) + asm = ir.InlineAsm(asmty, "mov $1, $2", "=r,r", side_effect=True) + builder.call(asm, [int32(123)]) + builder.ret_void() + pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )' + self.assertInText(pat, str(mod)) + self.assert_valid_ir(mod) + + def test_builder_asm(self): + mod = self.module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo') + builder = ir.IRBuilder(foo.append_basic_block('')) + asmty = ir.FunctionType(int32, [int32]) + builder.asm(asmty, "mov $1, $2", "=r,r", [int32(123)], side_effect=True) + builder.ret_void() + pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )' + self.assertInText(pat, str(mod)) + self.assert_valid_ir(mod) + + def test_builder_load_reg(self): + mod = self.module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo') + builder = ir.IRBuilder(foo.append_basic_block('')) + builder.load_reg(ir.IntType(64), "rax") + builder.ret_void() + pat = 'call i64 asm "", "={rax}"' + self.assertInText(pat, str(mod)) + self.assert_valid_ir(mod) + + def test_builder_store_reg(self): + mod = self.module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo') + builder = ir.IRBuilder(foo.append_basic_block('')) + builder.store_reg(int64(123), ir.IntType(64), "rax") + builder.ret_void() + pat = 'call void asm sideeffect "", "{rax}" ( i64 123 )' + self.assertInText(pat, str(mod)) + self.assert_valid_ir(mod) + + +class TestGlobalValues(TestBase): + + def test_globals_access(self): + mod = self.module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo') + ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'bar') + globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble') + self.assertEqual(mod.get_global('foo'), foo) + self.assertEqual(mod.get_global('globdouble'), globdouble) + with self.assertRaises(KeyError): + mod.get_global('kkk') + # Globals should have a useful repr() + self.assertEqual(repr(globdouble), + "") + + def test_functions_global_values_access(self): + """ + Accessing functions and global values through Module.functions + and Module.global_values. + """ + mod = self.module() + fty = ir.FunctionType(ir.VoidType(), []) + foo = ir.Function(mod, fty, 'foo') + bar = ir.Function(mod, fty, 'bar') + globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble') + self.assertEqual(set(mod.functions), set((foo, bar))) + self.assertEqual(set(mod.global_values), set((foo, bar, globdouble))) + + def test_global_variables_ir(self): + """ + IR serialization of global variables. + """ + mod = self.module() + # the following have side effects and write to self.module() + a = ir.GlobalVariable(mod, int8, 'a') # noqa F841 + b = ir.GlobalVariable(mod, int8, 'b', addrspace=42) # noqa F841 + # Initialized global variable doesn't default to "external" + c = ir.GlobalVariable(mod, int32, 'c') + c.initializer = int32(123) + d = ir.GlobalVariable(mod, int32, 'd') + d.global_constant = True + # Non-external linkage implies default "undef" initializer + e = ir.GlobalVariable(mod, int32, 'e') + e.linkage = "internal" + f = ir.GlobalVariable(mod, int32, 'f', addrspace=456) + f.unnamed_addr = True + g = ir.GlobalVariable(mod, int32, 'g') + g.linkage = "internal" + g.initializer = int32(123) + g.align = 16 + h = ir.GlobalVariable(mod, int32, 'h') + h.linkage = "internal" + h.initializer = int32(123) + h.section = "h_section" + i = ir.GlobalVariable(mod, int32, 'i') + i.linkage = "internal" + i.initializer = int32(456) + i.align = 8 + i.section = "i_section" + self.check_module_body(mod, """\ + @"a" = external global i8 + @"b" = external addrspace(42) global i8 + @"c" = global i32 123 + @"d" = external constant i32 + @"e" = internal global i32 undef + @"f" = external unnamed_addr addrspace(456) global i32 + @"g" = internal global i32 123, align 16 + @"h" = internal global i32 123, section "h_section" + @"i" = internal global i32 456, section "i_section", align 8 + """) + + def test_pickle(self): + mod = self.module() + self.assert_pickle_correctly(mod) + + +class TestBlock(TestBase): + + def test_attributes(self): + func = self.function() + block = ir.Block(parent=func, name='start') + self.assertIs(block.parent, func) + self.assertFalse(block.is_terminated) + + def test_descr(self): + block = self.block(name='my_block') + self.assertEqual(self.descr(block), "my_block:\n") + block.instructions.extend(['a', 'b']) + self.assertEqual(self.descr(block), "my_block:\n a\n b\n") + + def test_replace(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + c = builder.add(a, b, 'c') + d = builder.sub(a, b, 'd') + builder.mul(d, b, 'e') + f = ir.Instruction(block, a.type, 'sdiv', (c, b), 'f') + self.check_block(block, """\ + my_block: + %"c" = add i32 %".1", %".2" + %"d" = sub i32 %".1", %".2" + %"e" = mul i32 %"d", %".2" + """) + block.replace(d, f) + self.check_block(block, """\ + my_block: + %"c" = add i32 %".1", %".2" + %"f" = sdiv i32 %"c", %".2" + %"e" = mul i32 %"f", %".2" + """) + + def test_repr(self): + """ + Blocks should have a useful repr() + """ + func = self.function() + block = ir.Block(parent=func, name='start') + self.assertEqual(repr(block), "") + + +class TestBuildInstructions(TestBase): + """ + Test IR generation of LLVM instructions through the IRBuilder class. + """ + + maxDiff = 4000 + + def test_simple(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + inst = builder.add(a, b, 'res') + self.check_block(block, """\ + my_block: + %"res" = add i32 %".1", %".2" + """) + # Instructions should have a useful repr() + self.assertEqual(repr(inst), + ", " + ")>") + + def test_binops(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b, ff = builder.function.args[:3] + builder.add(a, b, 'c') + builder.fadd(a, b, 'd') + builder.sub(a, b, 'e') + builder.fsub(a, b, 'f') + builder.mul(a, b, 'g') + builder.fmul(a, b, 'h') + builder.udiv(a, b, 'i') + builder.sdiv(a, b, 'j') + builder.fdiv(a, b, 'k') + builder.urem(a, b, 'l') + builder.srem(a, b, 'm') + builder.frem(a, b, 'n') + builder.or_(a, b, 'o') + builder.and_(a, b, 'p') + builder.xor(a, b, 'q') + builder.shl(a, b, 'r') + builder.ashr(a, b, 's') + builder.lshr(a, b, 't') + with self.assertRaises(ValueError) as cm: + builder.add(a, ff) + self.assertEqual(str(cm.exception), + "Operands must be the same type, got (i32, double)") + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"c" = add i32 %".1", %".2" + %"d" = fadd i32 %".1", %".2" + %"e" = sub i32 %".1", %".2" + %"f" = fsub i32 %".1", %".2" + %"g" = mul i32 %".1", %".2" + %"h" = fmul i32 %".1", %".2" + %"i" = udiv i32 %".1", %".2" + %"j" = sdiv i32 %".1", %".2" + %"k" = fdiv i32 %".1", %".2" + %"l" = urem i32 %".1", %".2" + %"m" = srem i32 %".1", %".2" + %"n" = frem i32 %".1", %".2" + %"o" = or i32 %".1", %".2" + %"p" = and i32 %".1", %".2" + %"q" = xor i32 %".1", %".2" + %"r" = shl i32 %".1", %".2" + %"s" = ashr i32 %".1", %".2" + %"t" = lshr i32 %".1", %".2" + """) + + def test_binop_flags(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + # As tuple + builder.add(a, b, 'c', flags=('nuw',)) + # and as list + builder.sub(a, b, 'd', flags=['nuw', 'nsw']) + self.check_block(block, """\ + my_block: + %"c" = add nuw i32 %".1", %".2" + %"d" = sub nuw nsw i32 %".1", %".2" + """) + + def test_binop_fastmath_flags(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + # As tuple + builder.fadd(a, b, 'c', flags=('fast',)) + # and as list + builder.fsub(a, b, 'd', flags=['ninf', 'nsz']) + self.check_block(block, """\ + my_block: + %"c" = fadd fast i32 %".1", %".2" + %"d" = fsub ninf nsz i32 %".1", %".2" + """) + + def test_binops_with_overflow(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + builder.sadd_with_overflow(a, b, 'c') + builder.smul_with_overflow(a, b, 'd') + builder.ssub_with_overflow(a, b, 'e') + builder.uadd_with_overflow(a, b, 'f') + builder.umul_with_overflow(a, b, 'g') + builder.usub_with_overflow(a, b, 'h') + self.check_block(block, """\ +my_block: + %"c" = call {i32, i1} @"llvm.sadd.with.overflow.i32"(i32 %".1", i32 %".2") + %"d" = call {i32, i1} @"llvm.smul.with.overflow.i32"(i32 %".1", i32 %".2") + %"e" = call {i32, i1} @"llvm.ssub.with.overflow.i32"(i32 %".1", i32 %".2") + %"f" = call {i32, i1} @"llvm.uadd.with.overflow.i32"(i32 %".1", i32 %".2") + %"g" = call {i32, i1} @"llvm.umul.with.overflow.i32"(i32 %".1", i32 %".2") + %"h" = call {i32, i1} @"llvm.usub.with.overflow.i32"(i32 %".1", i32 %".2") + """) + + def test_unary_ops(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b, c = builder.function.args[:3] + builder.neg(a, 'd') + builder.not_(b, 'e') + builder.fneg(c, 'f') + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"d" = sub i32 0, %".1" + %"e" = xor i32 %".2", -1 + %"f" = fneg double %".3" + """) + + def test_replace_operand(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + undef1 = ir.Constant(ir.IntType(32), ir.Undefined) + undef2 = ir.Constant(ir.IntType(32), ir.Undefined) + c = builder.add(undef1, undef2, 'c') + self.check_block(block, """\ + my_block: + %"c" = add i32 undef, undef + """) + c.replace_usage(undef1, a) + c.replace_usage(undef2, b) + self.check_block(block, """\ + my_block: + %"c" = add i32 %".1", %".2" + """) + + def test_integer_comparisons(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + builder.icmp_unsigned('==', a, b, 'c') + builder.icmp_unsigned('!=', a, b, 'd') + builder.icmp_unsigned('<', a, b, 'e') + builder.icmp_unsigned('<=', a, b, 'f') + builder.icmp_unsigned('>', a, b, 'g') + builder.icmp_unsigned('>=', a, b, 'h') + builder.icmp_signed('==', a, b, 'i') + builder.icmp_signed('!=', a, b, 'j') + builder.icmp_signed('<', a, b, 'k') + builder.icmp_signed('<=', a, b, 'l') + builder.icmp_signed('>', a, b, 'm') + builder.icmp_signed('>=', a, b, 'n') + with self.assertRaises(ValueError): + builder.icmp_signed('uno', a, b, 'zz') + with self.assertRaises(ValueError): + builder.icmp_signed('foo', a, b, 'zz') + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"c" = icmp eq i32 %".1", %".2" + %"d" = icmp ne i32 %".1", %".2" + %"e" = icmp ult i32 %".1", %".2" + %"f" = icmp ule i32 %".1", %".2" + %"g" = icmp ugt i32 %".1", %".2" + %"h" = icmp uge i32 %".1", %".2" + %"i" = icmp eq i32 %".1", %".2" + %"j" = icmp ne i32 %".1", %".2" + %"k" = icmp slt i32 %".1", %".2" + %"l" = icmp sle i32 %".1", %".2" + %"m" = icmp sgt i32 %".1", %".2" + %"n" = icmp sge i32 %".1", %".2" + """) + + def test_float_comparisons(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + builder.fcmp_ordered('==', a, b, 'c') + builder.fcmp_ordered('!=', a, b, 'd') + builder.fcmp_ordered('<', a, b, 'e') + builder.fcmp_ordered('<=', a, b, 'f') + builder.fcmp_ordered('>', a, b, 'g') + builder.fcmp_ordered('>=', a, b, 'h') + builder.fcmp_unordered('==', a, b, 'i') + builder.fcmp_unordered('!=', a, b, 'j') + builder.fcmp_unordered('<', a, b, 'k') + builder.fcmp_unordered('<=', a, b, 'l') + builder.fcmp_unordered('>', a, b, 'm') + builder.fcmp_unordered('>=', a, b, 'n') + # fcmp_ordered and fcmp_unordered are the same for these cases + builder.fcmp_ordered('ord', a, b, 'u') + builder.fcmp_ordered('uno', a, b, 'v') + builder.fcmp_unordered('ord', a, b, 'w') + builder.fcmp_unordered('uno', a, b, 'x') + builder.fcmp_unordered('olt', a, b, 'y', + flags=['nnan', 'ninf', 'nsz', 'arcp', 'fast']) + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"c" = fcmp oeq i32 %".1", %".2" + %"d" = fcmp one i32 %".1", %".2" + %"e" = fcmp olt i32 %".1", %".2" + %"f" = fcmp ole i32 %".1", %".2" + %"g" = fcmp ogt i32 %".1", %".2" + %"h" = fcmp oge i32 %".1", %".2" + %"i" = fcmp ueq i32 %".1", %".2" + %"j" = fcmp une i32 %".1", %".2" + %"k" = fcmp ult i32 %".1", %".2" + %"l" = fcmp ule i32 %".1", %".2" + %"m" = fcmp ugt i32 %".1", %".2" + %"n" = fcmp uge i32 %".1", %".2" + %"u" = fcmp ord i32 %".1", %".2" + %"v" = fcmp uno i32 %".1", %".2" + %"w" = fcmp ord i32 %".1", %".2" + %"x" = fcmp uno i32 %".1", %".2" + %"y" = fcmp nnan ninf nsz arcp fast olt i32 %".1", %".2" + """) + + def test_misc_ops(self): + block = self.block(name='my_block') + t = ir.Constant(int1, True) + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + builder.select(t, a, b, 'c', flags=('arcp', 'nnan')) + self.assertFalse(block.is_terminated) + builder.unreachable() + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + %"c" = select arcp nnan i1 true, i32 %".1", i32 %".2" + unreachable + """) + + def test_phi(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + bb2 = builder.function.append_basic_block('b2') + bb3 = builder.function.append_basic_block('b3') + phi = builder.phi(int32, 'my_phi', flags=('fast',)) + phi.add_incoming(a, bb2) + phi.add_incoming(b, bb3) + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"my_phi" = phi fast i32 [%".1", %"b2"], [%".2", %"b3"] + """) + + def test_mem_ops(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b, z = builder.function.args[:3] + c = builder.alloca(int32, name='c') + d = builder.alloca(int32, size=42, name='d') # noqa F841 + e = builder.alloca(dbl, size=a, name='e') + e.align = 8 + self.assertEqual(e.type, ir.PointerType(dbl)) + ee = builder.store(z, e) + self.assertEqual(ee.type, ir.VoidType()) + f = builder.store(b, c) + self.assertEqual(f.type, ir.VoidType()) + g = builder.load(c, 'g') + self.assertEqual(g.type, int32) + # With alignment + h = builder.store(b, c, align=1) + self.assertEqual(h.type, ir.VoidType()) + i = builder.load(c, 'i', align=1) + self.assertEqual(i.type, int32) + # Atomics + j = builder.store_atomic(b, c, ordering="seq_cst", align=4) + self.assertEqual(j.type, ir.VoidType()) + k = builder.load_atomic(c, ordering="seq_cst", align=4, name='k') + self.assertEqual(k.type, int32) + # Not pointer types + with self.assertRaises(TypeError): + builder.store(b, a) + with self.assertRaises(TypeError): + builder.load(b) + # Mismatching pointer type + with self.assertRaises(TypeError) as cm: + builder.store(b, e) + self.assertEqual(str(cm.exception), + "cannot store i32 to double*: mismatching types") + self.check_block(block, """\ + my_block: + %"c" = alloca i32 + %"d" = alloca i32, i32 42 + %"e" = alloca double, i32 %".1", align 8 + store double %".3", double* %"e" + store i32 %".2", i32* %"c" + %"g" = load i32, i32* %"c" + store i32 %".2", i32* %"c", align 1 + %"i" = load i32, i32* %"c", align 1 + store atomic i32 %".2", i32* %"c" seq_cst, align 4 + %"k" = load atomic i32, i32* %"c" seq_cst, align 4 + """) + + def test_gep(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + c = builder.alloca(ir.PointerType(int32), name='c') + d = builder.gep(c, [ir.Constant(int32, 5), a], name='d') + self.assertEqual(d.type, ir.PointerType(int32)) + self.check_block(block, """\ + my_block: + %"c" = alloca i32* + %"d" = getelementptr i32*, i32** %"c", i32 5, i32 %".1" + """) + # XXX test with more complex types + + def test_gep_castinstr(self): + # similar to: + # numba::runtime::nrtdynmod.py_define_nrt_meminfo_data() + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + int8ptr = int8.as_pointer() + ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64]) + d = builder.bitcast(a, ls.as_pointer(), name='d') + e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e') + self.assertEqual(e.type, ir.PointerType(int8ptr)) + self.check_block(block, """\ + my_block: + %"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64}* + %"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64}* %"d", i32 0, i32 3 + """) # noqa E501 + + def test_gep_castinstr_addrspace(self): + # similar to: + # numba::runtime::nrtdynmod.py_define_nrt_meminfo_data() + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + addrspace = 4 + int8ptr = int8.as_pointer() + ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64]) + d = builder.bitcast(a, ls.as_pointer(addrspace=addrspace), name='d') + e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e') + self.assertEqual(e.type.addrspace, addrspace) + self.assertEqual(e.type, ir.PointerType(int8ptr, addrspace=addrspace)) + self.check_block(block, """\ + my_block: + %"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64} addrspace(4)* + %"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64} addrspace(4)* %"d", i32 0, i32 3 + """) # noqa E501 + + def test_gep_addrspace(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + addrspace = 4 + c = builder.alloca(ir.PointerType(int32, addrspace=addrspace), name='c') + self.assertEqual(str(c.type), 'i32 addrspace(4)**') + self.assertEqual(c.type.pointee.addrspace, addrspace) + d = builder.gep(c, [ir.Constant(int32, 5), a], name='d') + self.assertEqual(d.type.addrspace, addrspace) + e = builder.gep(d, [ir.Constant(int32, 10)], name='e') + self.assertEqual(e.type.addrspace, addrspace) + self.check_block(block, """\ + my_block: + %"c" = alloca i32 addrspace(4)* + %"d" = getelementptr i32 addrspace(4)*, i32 addrspace(4)** %"c", i32 5, i32 %".1" + %"e" = getelementptr i32, i32 addrspace(4)* %"d", i32 10 + """) # noqa E501 + + def test_extract_insert_value(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + tp_inner = ir.LiteralStructType([int32, int1]) + tp_outer = ir.LiteralStructType([int8, tp_inner]) + c_inner = ir.Constant(tp_inner, (ir.Constant(int32, 4), + ir.Constant(int1, True))) + # Flat structure + c = builder.extract_value(c_inner, 0, name='c') # noqa F841 + d = builder.insert_value(c_inner, a, 0, name='d') # noqa F841 + e = builder.insert_value(d, ir.Constant(int1, False), 1, name='e') # noqa F841 E501 + self.assertEqual(d.type, tp_inner) + self.assertEqual(e.type, tp_inner) + # Nested structure + p_outer = builder.alloca(tp_outer, name='ptr') + j = builder.load(p_outer, name='j') + k = builder.extract_value(j, 0, name='k') + l = builder.extract_value(j, 1, name='l') + m = builder.extract_value(j, (1, 0), name='m') + n = builder.extract_value(j, (1, 1), name='n') + o = builder.insert_value(j, l, 1, name='o') + p = builder.insert_value(j, a, (1, 0), name='p') + self.assertEqual(k.type, int8) + self.assertEqual(l.type, tp_inner) + self.assertEqual(m.type, int32) + self.assertEqual(n.type, int1) + self.assertEqual(o.type, tp_outer) + self.assertEqual(p.type, tp_outer) + + with self.assertRaises(TypeError): + # Not an aggregate + builder.extract_value(p_outer, 0) + with self.assertRaises(TypeError): + # Indexing too deep + builder.extract_value(c_inner, (0, 0)) + with self.assertRaises(TypeError): + # Index out of structure bounds + builder.extract_value(c_inner, 5) + with self.assertRaises(TypeError): + # Not an aggregate + builder.insert_value(a, b, 0) + with self.assertRaises(TypeError): + # Replacement value has the wrong type + builder.insert_value(c_inner, a, 1) + + self.check_block(block, """\ + my_block: + %"c" = extractvalue {i32, i1} {i32 4, i1 true}, 0 + %"d" = insertvalue {i32, i1} {i32 4, i1 true}, i32 %".1", 0 + %"e" = insertvalue {i32, i1} %"d", i1 false, 1 + %"ptr" = alloca {i8, {i32, i1}} + %"j" = load {i8, {i32, i1}}, {i8, {i32, i1}}* %"ptr" + %"k" = extractvalue {i8, {i32, i1}} %"j", 0 + %"l" = extractvalue {i8, {i32, i1}} %"j", 1 + %"m" = extractvalue {i8, {i32, i1}} %"j", 1, 0 + %"n" = extractvalue {i8, {i32, i1}} %"j", 1, 1 + %"o" = insertvalue {i8, {i32, i1}} %"j", {i32, i1} %"l", 1 + %"p" = insertvalue {i8, {i32, i1}} %"j", i32 %".1", 1, 0 + """) + + def test_cast_ops(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b, fa, ptr = builder.function.args[:4] + c = builder.trunc(a, int8, name='c') + d = builder.zext(c, int32, name='d') # noqa F841 + e = builder.sext(c, int32, name='e') # noqa F841 + fb = builder.fptrunc(fa, flt, 'fb') + fc = builder.fpext(fb, dbl, 'fc') # noqa F841 + g = builder.fptoui(fa, int32, 'g') + h = builder.fptosi(fa, int8, 'h') + fd = builder.uitofp(g, flt, 'fd') # noqa F841 + fe = builder.sitofp(h, dbl, 'fe') # noqa F841 + i = builder.ptrtoint(ptr, int32, 'i') + j = builder.inttoptr(i, ir.PointerType(int8), 'j') # noqa F841 + k = builder.bitcast(a, flt, "k") # noqa F841 + self.assertFalse(block.is_terminated) + self.check_block(block, """\ + my_block: + %"c" = trunc i32 %".1" to i8 + %"d" = zext i8 %"c" to i32 + %"e" = sext i8 %"c" to i32 + %"fb" = fptrunc double %".3" to float + %"fc" = fpext float %"fb" to double + %"g" = fptoui double %".3" to i32 + %"h" = fptosi double %".3" to i8 + %"fd" = uitofp i32 %"g" to float + %"fe" = sitofp i8 %"h" to double + %"i" = ptrtoint i32* %".4" to i32 + %"j" = inttoptr i32 %"i" to i8* + %"k" = bitcast i32 %".1" to float + """) + + def test_atomicrmw(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + c = builder.alloca(int32, name='c') + d = builder.atomic_rmw('add', c, a, 'monotonic', 'd') + self.assertEqual(d.type, int32) + self.check_block(block, """\ + my_block: + %"c" = alloca i32 + %"d" = atomicrmw add i32* %"c", i32 %".1" monotonic + """) + + def test_branch(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + bb_target = builder.function.append_basic_block(name='target') + builder.branch(bb_target) + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + br label %"target" + """) + + def test_cbranch(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + bb_true = builder.function.append_basic_block(name='b_true') + bb_false = builder.function.append_basic_block(name='b_false') + builder.cbranch(ir.Constant(int1, False), bb_true, bb_false) + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + br i1 false, label %"b_true", label %"b_false" + """) + + def test_cbranch_weights(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + bb_true = builder.function.append_basic_block(name='b_true') + bb_false = builder.function.append_basic_block(name='b_false') + br = builder.cbranch(ir.Constant(int1, False), bb_true, bb_false) + br.set_weights([5, 42]) + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + br i1 false, label %"b_true", label %"b_false", !prof !0 + """) + self.check_metadata(builder.module, """\ + !0 = !{ !"branch_weights", i32 5, i32 42 } + """) + + def test_branch_indirect(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + bb_1 = builder.function.append_basic_block(name='b_1') + bb_2 = builder.function.append_basic_block(name='b_2') + indirectbr = builder.branch_indirect( + ir.BlockAddress(builder.function, bb_1)) + indirectbr.add_destination(bb_1) + indirectbr.add_destination(bb_2) + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + indirectbr i8* blockaddress(@"my_func", %"b_1"), [label %"b_1", label %"b_2"] + """) # noqa E501 + + def test_returns(self): + def check(block, expected_ir): + self.assertTrue(block.is_terminated) + self.check_block(block, expected_ir) + + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + builder.ret_void() + check(block, """\ + my_block: + ret void + """) + + block = self.block(name='other_block') + builder = ir.IRBuilder(block) + builder.ret(int32(5)) + check(block, """\ + other_block: + ret i32 5 + """) + + # With metadata + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + inst = builder.ret_void() + inst.set_metadata("dbg", block.module.add_metadata(())) + check(block, """\ + my_block: + ret void, !dbg !0 + """) + + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + inst = builder.ret(int32(6)) + inst.set_metadata("dbg", block.module.add_metadata(())) + check(block, """\ + my_block: + ret i32 6, !dbg !0 + """) + + def test_switch(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + bb_onzero = builder.function.append_basic_block(name='onzero') + bb_onone = builder.function.append_basic_block(name='onone') + bb_ontwo = builder.function.append_basic_block(name='ontwo') + bb_else = builder.function.append_basic_block(name='otherwise') + sw = builder.switch(a, bb_else) + sw.add_case(ir.Constant(int32, 0), bb_onzero) + sw.add_case(ir.Constant(int32, 1), bb_onone) + # A plain Python value gets converted into the right IR constant + sw.add_case(2, bb_ontwo) + self.assertTrue(block.is_terminated) + self.check_block(block, """\ + my_block: + switch i32 %".1", label %"otherwise" [i32 0, label %"onzero" i32 1, label %"onone" i32 2, label %"ontwo"] + """) # noqa E501 + + def test_call(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + tp_f = ir.FunctionType(flt, (int32, int32)) + tp_g = ir.FunctionType(dbl, (int32,), var_arg=True) + tp_h = ir.FunctionType(hlf, (int32, int32)) + f = ir.Function(builder.function.module, tp_f, 'f') + g = ir.Function(builder.function.module, tp_g, 'g') + h = ir.Function(builder.function.module, tp_h, 'h') + builder.call(f, (a, b), 'res_f') + builder.call(g, (b, a), 'res_g') + builder.call(h, (a, b), 'res_h') + builder.call(f, (a, b), 'res_f_fast', cconv='fastcc') + res_f_readonly = builder.call(f, (a, b), 'res_f_readonly') + res_f_readonly.attributes.add('readonly') + builder.call(f, (a, b), 'res_fast', fastmath='fast') + builder.call(f, (a, b), 'res_nnan_ninf', fastmath=('nnan', 'ninf')) + builder.call(f, (a, b), 'res_noinline', attrs='noinline') + builder.call(f, (a, b), 'res_alwaysinline', attrs='alwaysinline') + builder.call(f, (a, b), 'res_noinline_ro', attrs=('noinline', + 'readonly')) + builder.call(f, (a, b), 'res_convergent', attrs='convergent') + self.check_block(block, """\ + my_block: + %"res_f" = call float @"f"(i32 %".1", i32 %".2") + %"res_g" = call double (i32, ...) @"g"(i32 %".2", i32 %".1") + %"res_h" = call half @"h"(i32 %".1", i32 %".2") + %"res_f_fast" = call fastcc float @"f"(i32 %".1", i32 %".2") + %"res_f_readonly" = call float @"f"(i32 %".1", i32 %".2") readonly + %"res_fast" = call fast float @"f"(i32 %".1", i32 %".2") + %"res_nnan_ninf" = call ninf nnan float @"f"(i32 %".1", i32 %".2") + %"res_noinline" = call float @"f"(i32 %".1", i32 %".2") noinline + %"res_alwaysinline" = call float @"f"(i32 %".1", i32 %".2") alwaysinline + %"res_noinline_ro" = call float @"f"(i32 %".1", i32 %".2") noinline readonly + %"res_convergent" = call float @"f"(i32 %".1", i32 %".2") convergent + """) # noqa E501 + + def test_call_metadata(self): + """ + Function calls with metadata arguments. + """ + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + dbg_declare_ty = ir.FunctionType(ir.VoidType(), [ir.MetaDataType()] * 3) + dbg_declare = ir.Function( + builder.module, + dbg_declare_ty, + 'llvm.dbg.declare') + a = builder.alloca(int32, name="a") + b = builder.module.add_metadata(()) + builder.call(dbg_declare, (a, b, b)) + self.check_block(block, """\ + my_block: + %"a" = alloca i32 + call void @"llvm.dbg.declare"(metadata i32* %"a", metadata !0, metadata !0) + """) # noqa E501 + + def test_call_attributes(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + fun_ty = ir.FunctionType( + ir.VoidType(), (int32.as_pointer(), int32, int32.as_pointer())) + fun = ir.Function(builder.function.module, fun_ty, 'fun') + fun.args[0].add_attribute('sret') + retval = builder.alloca(int32, name='retval') + other = builder.alloca(int32, name='other') + builder.call( + fun, + (retval, ir.Constant(int32, 42), other), + arg_attrs={ + 0: ('sret', 'noalias'), + 2: 'noalias' + } + ) + self.check_block_regex(block, """\ + my_block: + %"retval" = alloca i32 + %"other" = alloca i32 + call void @"fun"\\(i32\\* noalias sret(\\(i32\\))? %"retval", i32 42, i32\\* noalias %"other"\\) + """) # noqa E501 + + def test_call_tail(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + fun_ty = ir.FunctionType(ir.VoidType(), ()) + fun = ir.Function(builder.function.module, fun_ty, 'my_fun') + + builder.call(fun, ()) + builder.call(fun, (), tail=False) + builder.call(fun, (), tail=True) + builder.call(fun, (), tail='tail') + builder.call(fun, (), tail='notail') + builder.call(fun, (), tail='musttail') + builder.call(fun, (), tail=[]) # This is a falsy value + builder.call(fun, (), tail='not a marker') # This is a truthy value + + self.check_block(block, """\ + my_block: + call void @"my_fun"() + call void @"my_fun"() + tail call void @"my_fun"() + tail call void @"my_fun"() + notail call void @"my_fun"() + musttail call void @"my_fun"() + call void @"my_fun"() + tail call void @"my_fun"() + """) # noqa E501 + + def test_invalid_call_attributes(self): + block = self.block() + builder = ir.IRBuilder(block) + fun_ty = ir.FunctionType(ir.VoidType(), ()) + fun = ir.Function(builder.function.module, fun_ty, 'fun') + with self.assertRaises(ValueError): + # The function has no arguments, so this should fail. + builder.call(fun, (), arg_attrs={0: 'sret'}) + + def test_invoke(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + tp_f = ir.FunctionType(flt, (int32, int32)) + f = ir.Function(builder.function.module, tp_f, 'f') + bb_normal = builder.function.append_basic_block(name='normal') + bb_unwind = builder.function.append_basic_block(name='unwind') + builder.invoke(f, (a, b), bb_normal, bb_unwind, 'res_f') + self.check_block(block, """\ + my_block: + %"res_f" = invoke float @"f"(i32 %".1", i32 %".2") + to label %"normal" unwind label %"unwind" + """) + + def test_invoke_attributes(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + fun_ty = ir.FunctionType( + ir.VoidType(), (int32.as_pointer(), int32, int32.as_pointer())) + fun = ir.Function(builder.function.module, fun_ty, 'fun') + fun.calling_convention = "fastcc" + fun.args[0].add_attribute('sret') + retval = builder.alloca(int32, name='retval') + other = builder.alloca(int32, name='other') + bb_normal = builder.function.append_basic_block(name='normal') + bb_unwind = builder.function.append_basic_block(name='unwind') + builder.invoke( + fun, + (retval, ir.Constant(int32, 42), other), + bb_normal, + bb_unwind, + cconv='fastcc', + fastmath='fast', + attrs='noinline', + arg_attrs={ + 0: ('sret', 'noalias'), + 2: 'noalias' + } + ) + self.check_block_regex(block, """\ + my_block: + %"retval" = alloca i32 + %"other" = alloca i32 + invoke fast fastcc void @"fun"\\(i32\\* noalias sret(\\(i32\\))? %"retval", i32 42, i32\\* noalias %"other"\\) noinline + to label %"normal" unwind label %"unwind" + """) # noqa E501 + + def test_landingpad(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + lp = builder.landingpad(ir.LiteralStructType([int32, + int8.as_pointer()]), 'lp') + int_typeinfo = ir.GlobalVariable(builder.function.module, + int8.as_pointer(), "_ZTIi") + int_typeinfo.global_constant = True + lp.add_clause(ir.CatchClause(int_typeinfo)) + lp.add_clause(ir.FilterClause(ir.Constant(ir.ArrayType( + int_typeinfo.type, 1), [int_typeinfo]))) + builder.resume(lp) + self.check_block(block, """\ + my_block: + %"lp" = landingpad {i32, i8*} + catch i8** @"_ZTIi" + filter [1 x i8**] [i8** @"_ZTIi"] + resume {i32, i8*} %"lp" + """) + + def test_assume(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + c = builder.icmp_signed('>', a, b, name='c') + builder.assume(c) + self.check_block(block, """\ + my_block: + %"c" = icmp sgt i32 %".1", %".2" + call void @"llvm.assume"(i1 %"c") + """) + + def test_vector_ops(self): + block = self.block(name='insert_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + a.name = 'a' + b.name = 'b' + + vecty = ir.VectorType(a.type, 2) + vec = ir.Constant(vecty, ir.Undefined) + idxty = ir.IntType(32) + vec = builder.insert_element(vec, a, idxty(0), name='vec1') + vec = builder.insert_element(vec, b, idxty(1), name='vec2') + + self.check_block(block, """\ +insert_block: + %"vec1" = insertelement <2 x i32> , i32 %"a", i32 0 + %"vec2" = insertelement <2 x i32> %"vec1", i32 %"b", i32 1 + """) + + block = builder.append_basic_block("shuffle_block") + builder.branch(block) + builder.position_at_end(block) + + mask = ir.Constant(vecty, [1, 0]) + builder.shuffle_vector(vec, vec, mask, name='shuf') + + self.check_block(block, """\ + shuffle_block: + %"shuf" = shufflevector <2 x i32> %"vec2", <2 x i32> %"vec2", <2 x i32> + """) # noqa E501 + + block = builder.append_basic_block("add_block") + builder.branch(block) + builder.position_at_end(block) + + builder.add(vec, vec, name='sum') + + self.check_block(block, """\ + add_block: + %"sum" = add <2 x i32> %"vec2", %"vec2" + """) + + block = builder.append_basic_block("extract_block") + builder.branch(block) + builder.position_at_end(block) + + c = builder.extract_element(vec, idxty(0), name='ex1') + d = builder.extract_element(vec, idxty(1), name='ex2') + + self.check_block(block, """\ + extract_block: + %"ex1" = extractelement <2 x i32> %"vec2", i32 0 + %"ex2" = extractelement <2 x i32> %"vec2", i32 1 + """) + + builder.ret(builder.add(c, d)) + self.assert_valid_ir(builder.module) + + def test_bitreverse(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int64, 5) + c = builder.bitreverse(a, name='c') + builder.ret(c) + self.check_block(block, """\ + my_block: + %"c" = call i64 @"llvm.bitreverse.i64"(i64 5) + ret i64 %"c" + """) + + def test_bitreverse_wrongtype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5) + + with self.assertRaises(TypeError) as raises: + builder.bitreverse(a, name='c') + self.assertIn( + "expected an integer type, got float", + str(raises.exception)) + + def test_fence(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + with self.assertRaises(ValueError) as raises: + builder.fence("monotonic", None) + self.assertIn( + "Invalid fence ordering \"monotonic\"!", + str(raises.exception)) + with self.assertRaises(ValueError) as raises: + builder.fence(None, "monotonic") + self.assertIn( + "Invalid fence ordering \"None\"!", + str(raises.exception)) + builder.fence("acquire", None) + builder.fence("release", "singlethread") + builder.fence("acq_rel", "singlethread") + builder.fence("seq_cst") + builder.ret_void() + self.check_block(block, """\ + my_block: + fence acquire + fence syncscope("singlethread") release + fence syncscope("singlethread") acq_rel + fence seq_cst + ret void + """) + + def test_comment(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + with self.assertRaises(AssertionError): + builder.comment("so\nmany lines") + builder.comment("my comment") + builder.ret_void() + self.check_block(block, """\ + my_block: + ; my comment + ret void + """) + + def test_bswap(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int32, 5) + c = builder.bswap(a, name='c') + builder.ret(c) + self.check_block(block, """\ + my_block: + %"c" = call i32 @"llvm.bswap.i32"(i32 5) + ret i32 %"c" + """) + + def test_ctpop(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int16, 5) + c = builder.ctpop(a, name='c') + builder.ret(c) + self.check_block(block, """\ + my_block: + %"c" = call i16 @"llvm.ctpop.i16"(i16 5) + ret i16 %"c" + """) + + def test_ctlz(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int16, 5) + b = ir.Constant(int1, 1) + c = builder.ctlz(a, b, name='c') + builder.ret(c) + self.check_block(block, """\ + my_block: + %"c" = call i16 @"llvm.ctlz.i16"(i16 5, i1 1) + ret i16 %"c" + """) + + def test_convert_to_fp16_f32(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5.0) + b = builder.convert_to_fp16(a, name='b') + builder.ret(b) + self.check_block(block, """\ + my_block: + %"b" = call i16 @"llvm.convert.to.fp16.f32"(float 0x4014000000000000) + ret i16 %"b" + """) # noqa E501 + + def test_convert_to_fp16_f32_wrongtype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int16, 5) + + with self.assertRaises(TypeError) as raises: + builder.convert_to_fp16(a, name='b') + self.assertIn( + "expected a float type, got i16", + str(raises.exception)) + + def test_convert_from_fp16_f32(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int16, 5) + b = builder.convert_from_fp16(a, name='b', to=flt) + builder.ret(b) + self.check_block(block, """\ + my_block: + %"b" = call float @"llvm.convert.from.fp16.f32"(i16 5) + ret float %"b" + """) + + def test_convert_from_fp16_f32_notype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5.5) + + with self.assertRaises(TypeError) as raises: + builder.convert_from_fp16(a, name='b') + self.assertIn( + "expected a float return type", + str(raises.exception)) + + def test_convert_from_fp16_f32_wrongtype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5.5) + + with self.assertRaises(TypeError) as raises: + builder.convert_from_fp16(a, name='b', to=flt) + self.assertIn( + "expected an i16 type, got float", + str(raises.exception)) + + def test_convert_from_fp16_f32_wrongtype2(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5.5) + + with self.assertRaises(TypeError) as raises: + builder.convert_from_fp16(a, name='b', to=int16) + self.assertIn( + "expected a float type, got i16", + str(raises.exception)) + + def test_cttz(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int64, 5) + b = ir.Constant(int1, 1) + c = builder.cttz(a, b, name='c') + builder.ret(c) + self.check_block(block, """\ + my_block: + %"c" = call i64 @"llvm.cttz.i64"(i64 5, i1 1) + ret i64 %"c" + """) + + def test_cttz_wrongflag(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int64, 5) + b = ir.Constant(int32, 3) + + with self.assertRaises(TypeError) as raises: + builder.cttz(a, b, name='c') + self.assertIn( + "expected an i1 type, got i32", + str(raises.exception)) + + def test_cttz_wrongtype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5) + b = ir.Constant(int1, 1) + + with self.assertRaises(TypeError) as raises: + builder.cttz(a, b, name='c') + self.assertIn( + "expected an integer type, got float", + str(raises.exception)) + + def test_fma(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5) + b = ir.Constant(flt, 1) + c = ir.Constant(flt, 2) + fma = builder.fma(a, b, c, name='fma') + builder.ret(fma) + self.check_block(block, """\ + my_block: + %"fma" = call float @"llvm.fma.f32"(float 0x4014000000000000, float 0x3ff0000000000000, float 0x4000000000000000) + ret float %"fma" + """) # noqa E501 + + def test_fma_wrongtype(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(int32, 5) + b = ir.Constant(int32, 1) + c = ir.Constant(int32, 2) + + with self.assertRaises(TypeError) as raises: + builder.fma(a, b, c, name='fma') + self.assertIn( + "expected an floating point type, got i32", + str(raises.exception)) + + def test_fma_mixedtypes(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a = ir.Constant(flt, 5) + b = ir.Constant(dbl, 1) + c = ir.Constant(flt, 2) + + with self.assertRaises(TypeError) as raises: + builder.fma(a, b, c, name='fma') + self.assertIn( + "expected types to be the same, got float, double, float", + str(raises.exception)) + + def test_arg_attributes(self): + def gen_code(attr_name): + fnty = ir.FunctionType(ir.IntType(32), [ir.IntType(32).as_pointer(), + ir.IntType(32)]) + module = ir.Module() + + func = ir.Function(module, fnty, name="sum") + + bb_entry = func.append_basic_block() + bb_loop = func.append_basic_block() + bb_exit = func.append_basic_block() + + builder = ir.IRBuilder() + builder.position_at_end(bb_entry) + + builder.branch(bb_loop) + builder.position_at_end(bb_loop) + + index = builder.phi(ir.IntType(32)) + index.add_incoming(ir.Constant(index.type, 0), bb_entry) + accum = builder.phi(ir.IntType(32)) + accum.add_incoming(ir.Constant(accum.type, 0), bb_entry) + + func.args[0].add_attribute(attr_name) + ptr = builder.gep(func.args[0], [index]) + value = builder.load(ptr) + + added = builder.add(accum, value) + accum.add_incoming(added, bb_loop) + + indexp1 = builder.add(index, ir.Constant(index.type, 1)) + index.add_incoming(indexp1, bb_loop) + + cond = builder.icmp_unsigned('<', indexp1, func.args[1]) + builder.cbranch(cond, bb_loop, bb_exit) + + builder.position_at_end(bb_exit) + builder.ret(added) + + return str(module) + + for attr_name in ( + 'byref', + 'byval', + 'elementtype', + 'immarg', + 'inalloca', + 'inreg', + 'nest', + 'noalias', + 'nocapture', + 'nofree', + 'nonnull', + 'noundef', + 'preallocated', + 'returned', + 'signext', + 'swiftasync', + 'swifterror', + 'swiftself', + 'zeroext', + ): + # If this parses, we emitted the right byval attribute format + llvm.parse_assembly(gen_code(attr_name)) + # sret doesn't fit this pattern and is tested in test_call_attributes + + +class TestBuilderMisc(TestBase): + """ + Test various other features of the IRBuilder class. + """ + + def test_attributes(self): + block = self.block(name='start') + builder = ir.IRBuilder(block) + self.assertIs(builder.function, block.parent) + self.assertIsInstance(builder.function, ir.Function) + self.assertIs(builder.module, block.parent.module) + self.assertIsInstance(builder.module, ir.Module) + + def test_goto_block(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + a, b = builder.function.args[:2] + builder.add(a, b, 'c') + bb_new = builder.append_basic_block(name='foo') + with builder.goto_block(bb_new): + builder.fadd(a, b, 'd') + with builder.goto_entry_block(): + builder.sub(a, b, 'e') + builder.fsub(a, b, 'f') + builder.branch(bb_new) + builder.mul(a, b, 'g') + with builder.goto_block(bb_new): + builder.fmul(a, b, 'h') + self.check_block(block, """\ + my_block: + %"c" = add i32 %".1", %".2" + %"e" = sub i32 %".1", %".2" + %"g" = mul i32 %".1", %".2" + """) + self.check_block(bb_new, """\ + foo: + %"d" = fadd i32 %".1", %".2" + %"f" = fsub i32 %".1", %".2" + %"h" = fmul i32 %".1", %".2" + br label %"foo" + """) + + def test_if_then(self): + block = self.block(name='one') + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + a = builder.add(z, z, 'a') + with builder.if_then(a) as bbend: + builder.add(z, z, 'b') + # Block will be terminated implicitly + self.assertIs(builder.block, bbend) + c = builder.add(z, z, 'c') + with builder.if_then(c): + builder.add(z, z, 'd') + builder.branch(block) + # No implicit termination + self.check_func_body(builder.function, """\ + one: + %"a" = add i1 0, 0 + br i1 %"a", label %"one.if", label %"one.endif" + one.if: + %"b" = add i1 0, 0 + br label %"one.endif" + one.endif: + %"c" = add i1 0, 0 + br i1 %"c", label %"one.endif.if", label %"one.endif.endif" + one.endif.if: + %"d" = add i1 0, 0 + br label %"one" + one.endif.endif: + """) + + def test_if_then_nested(self): + # Implicit termination in a nested if/then + block = self.block(name='one') + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + a = builder.add(z, z, 'a') + with builder.if_then(a): + b = builder.add(z, z, 'b') + with builder.if_then(b): + builder.add(z, z, 'c') + builder.ret_void() + self.check_func_body(builder.function, """\ + one: + %"a" = add i1 0, 0 + br i1 %"a", label %"one.if", label %"one.endif" + one.if: + %"b" = add i1 0, 0 + br i1 %"b", label %"one.if.if", label %"one.if.endif" + one.endif: + ret void + one.if.if: + %"c" = add i1 0, 0 + br label %"one.if.endif" + one.if.endif: + br label %"one.endif" + """) + + def test_if_then_long_label(self): + full_label = 'Long' * 20 + block = self.block(name=full_label) + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + a = builder.add(z, z, 'a') + with builder.if_then(a): + b = builder.add(z, z, 'b') + with builder.if_then(b): + builder.add(z, z, 'c') + builder.ret_void() + self.check_func_body(builder.function, """\ + {full_label}: + %"a" = add i1 0, 0 + br i1 %"a", label %"{label}.if", label %"{label}.endif" + {label}.if: + %"b" = add i1 0, 0 + br i1 %"b", label %"{label}.if.if", label %"{label}.if.endif" + {label}.endif: + ret void + {label}.if.if: + %"c" = add i1 0, 0 + br label %"{label}.if.endif" + {label}.if.endif: + br label %"{label}.endif" + """.format(full_label=full_label, label=full_label[:25] + '..')) + + def test_if_then_likely(self): + def check(likely): + block = self.block(name='one') + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + with builder.if_then(z, likely=likely): + pass + self.check_block(block, """\ + one: + br i1 0, label %"one.if", label %"one.endif", !prof !0 + """) + return builder + builder = check(True) + self.check_metadata(builder.module, """\ + !0 = !{ !"branch_weights", i32 99, i32 1 } + """) + builder = check(False) + self.check_metadata(builder.module, """\ + !0 = !{ !"branch_weights", i32 1, i32 99 } + """) + + def test_if_else(self): + block = self.block(name='one') + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + a = builder.add(z, z, 'a') + with builder.if_else(a) as (then, otherwise): + with then: + builder.add(z, z, 'b') + with otherwise: + builder.add(z, z, 'c') + # Each block will be terminated implicitly + with builder.if_else(a) as (then, otherwise): + with then: + builder.branch(block) + with otherwise: + builder.ret_void() + # No implicit termination + self.check_func_body(builder.function, """\ + one: + %"a" = add i1 0, 0 + br i1 %"a", label %"one.if", label %"one.else" + one.if: + %"b" = add i1 0, 0 + br label %"one.endif" + one.else: + %"c" = add i1 0, 0 + br label %"one.endif" + one.endif: + br i1 %"a", label %"one.endif.if", label %"one.endif.else" + one.endif.if: + br label %"one" + one.endif.else: + ret void + one.endif.endif: + """) + + def test_if_else_likely(self): + def check(likely): + block = self.block(name='one') + builder = ir.IRBuilder(block) + z = ir.Constant(int1, 0) + with builder.if_else(z, likely=likely) as (then, otherwise): + with then: + builder.branch(block) + with otherwise: + builder.ret_void() + self.check_func_body(builder.function, """\ + one: + br i1 0, label %"one.if", label %"one.else", !prof !0 + one.if: + br label %"one" + one.else: + ret void + one.endif: + """) + return builder + builder = check(True) + self.check_metadata(builder.module, """\ + !0 = !{ !"branch_weights", i32 99, i32 1 } + """) + builder = check(False) + self.check_metadata(builder.module, """\ + !0 = !{ !"branch_weights", i32 1, i32 99 } + """) + + def test_positioning(self): + """ + Test IRBuilder.position_{before,after,at_start,at_end}. + """ + func = self.function() + builder = ir.IRBuilder() + z = ir.Constant(int32, 0) + bb_one = func.append_basic_block(name='one') + bb_two = func.append_basic_block(name='two') + bb_three = func.append_basic_block(name='three') + # .at_start(empty block) + builder.position_at_start(bb_one) + builder.add(z, z, 'a') + # .at_end(empty block) + builder.position_at_end(bb_two) + builder.add(z, z, 'm') + builder.add(z, z, 'n') + # .at_start(block) + builder.position_at_start(bb_two) + o = builder.add(z, z, 'o') + builder.add(z, z, 'p') + # .at_end(block) + builder.position_at_end(bb_one) + b = builder.add(z, z, 'b') + # .after(instr) + builder.position_after(o) + builder.add(z, z, 'q') + # .before(instr) + builder.position_before(b) + builder.add(z, z, 'c') + self.check_block(bb_one, """\ + one: + %"a" = add i32 0, 0 + %"c" = add i32 0, 0 + %"b" = add i32 0, 0 + """) + self.check_block(bb_two, """\ + two: + %"o" = add i32 0, 0 + %"q" = add i32 0, 0 + %"p" = add i32 0, 0 + %"m" = add i32 0, 0 + %"n" = add i32 0, 0 + """) + self.check_block(bb_three, """\ + three: + """) + + def test_instruction_removal(self): + func = self.function() + builder = ir.IRBuilder() + blk = func.append_basic_block(name='entry') + builder.position_at_end(blk) + k = ir.Constant(int32, 1234) + a = builder.add(k, k, 'a') + retvoid = builder.ret_void() + self.assertTrue(blk.is_terminated) + builder.remove(retvoid) + self.assertFalse(blk.is_terminated) + b = builder.mul(a, a, 'b') + c = builder.add(b, b, 'c') + builder.remove(c) + builder.ret_void() + self.assertTrue(blk.is_terminated) + self.check_block(blk, """\ + entry: + %"a" = add i32 1234, 1234 + %"b" = mul i32 %"a", %"a" + ret void + """) + + def test_metadata(self): + block = self.block(name='my_block') + builder = ir.IRBuilder(block) + builder.debug_metadata = builder.module.add_metadata([]) + builder.alloca(ir.PointerType(int32), name='c') + self.check_block(block, """\ + my_block: + %"c" = alloca i32*, !dbg !0 + """) + + +class TestTypes(TestBase): + + def has_logical_equality(self, ty): + while isinstance(ty, ir.PointerType): + ty = ty.pointee + return not isinstance(ty, ir.LabelType) + + def assorted_types(self): + """ + A bunch of mutually unequal types + """ + # Avoid polluting the namespace + context = ir.Context() + types = [ + ir.LabelType(), ir.VoidType(), + ir.FunctionType(int1, (int8, int8)), ir.FunctionType(int1, (int8,)), + ir.FunctionType(int1, (int8,), var_arg=True), + ir.FunctionType(int8, (int8,)), + int1, int8, int32, flt, dbl, + ir.ArrayType(flt, 5), ir.ArrayType(dbl, 5), ir.ArrayType(dbl, 4), + ir.LiteralStructType((int1, int8)), ir.LiteralStructType((int8, + int1)), + context.get_identified_type("MyType1"), + context.get_identified_type("MyType2"), + ] + types += [ir.PointerType(tp) for tp in types + if not isinstance(tp, (ir.VoidType, ir.LabelType))] + + return types + + def test_pickling(self): + types = self.assorted_types() + for ty in types: + newty = self.assert_pickle_correctly(ty) + if self.has_logical_equality(ty): + self.assertEqual(newty, ty) + + def test_comparisons(self): + types = self.assorted_types() + for a, b in itertools.product(types, types): + if a is not b: + self.assertFalse(a == b, (a, b)) + self.assertTrue(a != b, (a, b)) + # We assume copy.copy() works fine here... + for tp in types: + other = copy.copy(tp) + if self.has_logical_equality(tp): + self.assertTrue(tp == other, (tp, other)) + self.assertFalse(tp != other, (tp, other)) + else: + self.assertFalse(tp == other, (tp, other)) + self.assertTrue(tp != other, (tp, other)) + + def test_str(self): + """ + Test the string representation of types. + """ + self.assertEqual(str(int1), 'i1') + self.assertEqual(str(ir.IntType(29)), 'i29') + self.assertEqual(str(flt), 'float') + self.assertEqual(str(dbl), 'double') + self.assertEqual(str(ir.VoidType()), 'void') + self.assertEqual(str(ir.FunctionType(int1, ())), 'i1 ()') + self.assertEqual(str(ir.FunctionType(int1, (flt,))), 'i1 (float)') + self.assertEqual(str(ir.FunctionType(int1, (flt, dbl))), + 'i1 (float, double)') + self.assertEqual(str(ir.FunctionType(int1, (), var_arg=True)), + 'i1 (...)') + self.assertEqual(str(ir.FunctionType(int1, (flt,), var_arg=True)), + 'i1 (float, ...)') + self.assertEqual(str(ir.FunctionType(int1, (flt, dbl), var_arg=True)), + 'i1 (float, double, ...)') + self.assertEqual(str(ir.PointerType(int32)), 'i32*') + self.assertEqual(str(ir.PointerType(ir.PointerType(int32))), 'i32**') + self.assertEqual(str(ir.ArrayType(int1, 5)), '[5 x i1]') + self.assertEqual(str(ir.ArrayType(ir.PointerType(int1), 5)), + '[5 x i1*]') + self.assertEqual(str(ir.PointerType(ir.ArrayType(int1, 5))), + '[5 x i1]*') + self.assertEqual(str(ir.LiteralStructType((int1,))), '{i1}') + self.assertEqual(str(ir.LiteralStructType((int1, flt))), '{i1, float}') + self.assertEqual(str(ir.LiteralStructType(( + ir.PointerType(int1), ir.LiteralStructType((int32, int8))))), + '{i1*, {i32, i8}}') + self.assertEqual(str(ir.LiteralStructType((int1,), packed=True)), + '<{i1}>') + self.assertEqual(str(ir.LiteralStructType((int1, flt), packed=True)), + '<{i1, float}>') + + # Avoid polluting the namespace + context = ir.Context() + mytype = context.get_identified_type("MyType") + self.assertEqual(str(mytype), "%\"MyType\"") + mytype1 = context.get_identified_type("MyType\\") + self.assertEqual(str(mytype1), "%\"MyType\\5c\"") + mytype2 = context.get_identified_type("MyType\"") + self.assertEqual(str(mytype2), "%\"MyType\\22\"") + + def test_hash(self): + for typ in filter(self.has_logical_equality, self.assorted_types()): + self.assertEqual(hash(typ), hash(copy.copy(typ))) + + def test_gep(self): + def check_constant(tp, i, expected): + actual = tp.gep(ir.Constant(int32, i)) + self.assertEqual(actual, expected) + + def check_index_type(tp): + index = ir.Constant(dbl, 1.0) + with self.assertRaises(TypeError): + tp.gep(index) + + tp = ir.PointerType(dbl) + for i in range(5): + check_constant(tp, i, dbl) + check_index_type(tp) + + tp = ir.ArrayType(int1, 3) + for i in range(3): + check_constant(tp, i, int1) + check_index_type(tp) + + tp = ir.LiteralStructType((dbl, ir.LiteralStructType((int1, int8)))) + check_constant(tp, 0, dbl) + check_constant(tp, 1, ir.LiteralStructType((int1, int8))) + with self.assertRaises(IndexError): + tp.gep(ir.Constant(int32, 2)) + check_index_type(tp) + + context = ir.Context() + tp = ir.IdentifiedStructType(context, "MyType") + tp.set_body(dbl, ir.LiteralStructType((int1, int8))) + check_constant(tp, 0, dbl) + check_constant(tp, 1, ir.LiteralStructType((int1, int8))) + with self.assertRaises(IndexError): + tp.gep(ir.Constant(int32, 2)) + check_index_type(tp) + + def test_abi_size(self): + td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") + + def check(tp, expected): + self.assertEqual(tp.get_abi_size(td), expected) + check(int8, 1) + check(int32, 4) + check(int64, 8) + check(ir.ArrayType(int8, 5), 5) + check(ir.ArrayType(int32, 5), 20) + check(ir.LiteralStructType((dbl, flt, flt)), 16) + + def test_abi_alignment(self): + td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") + + def check(tp, expected): + self.assertIn(tp.get_abi_alignment(td), expected) + check(int8, (1, 2, 4)) + check(int32, (4,)) + check(int64, (8,)) + check(ir.ArrayType(int8, 5), (1, 2, 4)) + check(ir.ArrayType(int32, 5), (4,)) + check(ir.LiteralStructType((dbl, flt, flt)), (8,)) + + def test_identified_struct(self): + context = ir.Context() + mytype = context.get_identified_type("MyType") + module = ir.Module(context=context) + self.assertTrue(mytype.is_opaque) + self.assert_valid_ir(module) + oldstr = str(module) + mytype.set_body(ir.IntType(32), ir.IntType(64), ir.FloatType()) + self.assertFalse(mytype.is_opaque) + self.assert_valid_ir(module) + self.assertNotEqual(oldstr, str(module)) + + def test_target_data_non_default_context(self): + context = ir.Context() + mytype = context.get_identified_type("MyType") + mytype.elements = [ir.IntType(32)] + td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") + self.assertEqual(mytype.get_abi_size(td, context=context), 4) + + def test_vector(self): + vecty = ir.VectorType(ir.IntType(32), 8) + self.assertEqual(str(vecty), "<8 x i32>") + + +def c32(i): + return ir.Constant(int32, i) + + +class TestConstant(TestBase): + + def test_integers(self): + c = ir.Constant(int32, 42) + self.assertEqual(str(c), 'i32 42') + c = ir.Constant(int1, 1) + self.assertEqual(str(c), 'i1 1') + c = ir.Constant(int1, 0) + self.assertEqual(str(c), 'i1 0') + c = ir.Constant(int1, True) + self.assertEqual(str(c), 'i1 true') + c = ir.Constant(int1, False) + self.assertEqual(str(c), 'i1 false') + c = ir.Constant(int1, ir.Undefined) + self.assertEqual(str(c), 'i1 undef') + c = ir.Constant(int1, None) + self.assertEqual(str(c), 'i1 0') + + def test_reals(self): + # XXX Test NaNs and infs + c = ir.Constant(flt, 1.5) + self.assertEqual(str(c), 'float 0x3ff8000000000000') + c = ir.Constant(flt, -1.5) + self.assertEqual(str(c), 'float 0xbff8000000000000') + c = ir.Constant(dbl, 1.5) + self.assertEqual(str(c), 'double 0x3ff8000000000000') + c = ir.Constant(dbl, -1.5) + self.assertEqual(str(c), 'double 0xbff8000000000000') + c = ir.Constant(dbl, ir.Undefined) + self.assertEqual(str(c), 'double undef') + c = ir.Constant(dbl, None) + self.assertEqual(str(c), 'double 0.0') + + def test_arrays(self): + c = ir.Constant(ir.ArrayType(int32, 3), (c32(5), c32(6), c32(4))) + self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 4]') + c = ir.Constant(ir.ArrayType(int32, 2), (c32(5), c32(ir.Undefined))) + self.assertEqual(str(c), '[2 x i32] [i32 5, i32 undef]') + + c = ir.Constant.literal_array((c32(5), c32(6), c32(ir.Undefined))) + self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 undef]') + with self.assertRaises(TypeError) as raises: + ir.Constant.literal_array((c32(5), ir.Constant(flt, 1.5))) + self.assertEqual(str(raises.exception), + "all elements must have the same type") + + c = ir.Constant(ir.ArrayType(int32, 2), ir.Undefined) + self.assertEqual(str(c), '[2 x i32] undef') + c = ir.Constant(ir.ArrayType(int32, 2), None) + self.assertEqual(str(c), '[2 x i32] zeroinitializer') + # Raw array syntax + c = ir.Constant(ir.ArrayType(int8, 11), bytearray(b"foobar_123\x80")) + self.assertEqual(str(c), r'[11 x i8] c"foobar_123\80"') + c = ir.Constant(ir.ArrayType(int8, 4), bytearray(b"\x00\x01\x04\xff")) + self.assertEqual(str(c), r'[4 x i8] c"\00\01\04\ff"') + # Recursive instantiation of inner constants + c = ir.Constant(ir.ArrayType(int32, 3), (5, ir.Undefined, 6)) + self.assertEqual(str(c), '[3 x i32] [i32 5, i32 undef, i32 6]') + # Invalid number of args + with self.assertRaises(ValueError): + ir.Constant(ir.ArrayType(int32, 3), (5, 6)) + + def test_vector(self): + vecty = ir.VectorType(ir.IntType(32), 8) + vals = [1, 2, 4, 3, 8, 6, 9, 7] + vec = ir.Constant(vecty, vals) + vec_repr = "<8 x i32> <{}>".format( + ', '.join(map('i32 {}'.format, vals))) + self.assertEqual(str(vec), vec_repr) + + def test_non_nullable_int(self): + constant = ir.Constant(ir.IntType(32), None).constant + self.assertEqual(constant, 0) + + def test_structs(self): + st1 = ir.LiteralStructType((flt, int1)) + st2 = ir.LiteralStructType((int32, st1)) + c = ir.Constant(st1, (ir.Constant(ir.FloatType(), 1.5), + ir.Constant(int1, True))) + self.assertEqual(str(c), + '{float, i1} {float 0x3ff8000000000000, i1 true}') + c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5), + ir.Constant(int1, True))) + self.assertEqual(c.type, st1) + self.assertEqual(str(c), + '{float, i1} {float 0x3ff8000000000000, i1 true}') + c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5), + ir.Constant(int1, ir.Undefined))) + self.assertEqual(c.type, st1) + self.assertEqual(str(c), + '{float, i1} {float 0x3ff8000000000000, i1 undef}') + c = ir.Constant(st1, ir.Undefined) + self.assertEqual(str(c), '{float, i1} undef') + c = ir.Constant(st1, None) + self.assertEqual(str(c), '{float, i1} zeroinitializer') + # Recursive instantiation of inner constants + c1 = ir.Constant(st1, (1.5, True)) + self.assertEqual(str(c1), + '{float, i1} {float 0x3ff8000000000000, i1 true}') + c2 = ir.Constant(st2, (42, c1)) + self.assertEqual(str(c2), ('{i32, {float, i1}} {i32 42, {float, i1} ' + '{float 0x3ff8000000000000, i1 true}}')) + c3 = ir.Constant(st2, (42, (1.5, True))) + self.assertEqual(str(c3), str(c2)) + # Invalid number of args + with self.assertRaises(ValueError): + ir.Constant(st2, (4, 5, 6)) + + def test_undefined_literal_struct_pickling(self): + i8 = ir.IntType(8) + st = ir.Constant(ir.LiteralStructType([i8, i8]), ir.Undefined) + self.assert_pickle_correctly(st) + + def test_type_instantiaton(self): + """ + Instantiating a type should create a constant. + """ + c = int8(42) + self.assertIsInstance(c, ir.Constant) + self.assertEqual(str(c), 'i8 42') + c = int1(True) + self.assertIsInstance(c, ir.Constant) + self.assertEqual(str(c), 'i1 true') + # Arrays + at = ir.ArrayType(int32, 3) + c = at([c32(4), c32(5), c32(6)]) + self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]') + c = at([4, 5, 6]) + self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]') + c = at(None) + self.assertEqual(str(c), '[3 x i32] zeroinitializer') + with self.assertRaises(ValueError): + at([4, 5, 6, 7]) + # Structs + st1 = ir.LiteralStructType((flt, int1)) + st2 = ir.LiteralStructType((int32, st1)) + c = st1((1.5, True)) + self.assertEqual(str(c), ('{float, i1} {float 0x3ff8000000000000, i1 ' + 'true}')) + c = st2((42, (1.5, True))) + self.assertEqual(str(c), ('{i32, {float, i1}} {i32 42, {float, i1} ' + '{float 0x3ff8000000000000, i1 true}}')) + + def test_repr(self): + """ + Constants should have a useful repr(). + """ + c = int32(42) + self.assertEqual(repr(c), "") + + def test_encoding_problem(self): + c = ir.Constant(ir.ArrayType(ir.IntType(8), 256), + bytearray(range(256))) + m = self.module() + gv = ir.GlobalVariable(m, c.type, "myconstant") + gv.global_constant = True + gv.initializer = c + # With utf-8, the following will cause: + # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe0 in position + # 136: invalid continuation byte + parsed = llvm.parse_assembly(str(m)) + # Make sure the encoding does not modify the IR + reparsed = llvm.parse_assembly(str(parsed)) + self.assertEqual(str(parsed), str(reparsed)) + + def test_gep(self): + m = self.module() + tp = ir.LiteralStructType((flt, int1)) + gv = ir.GlobalVariable(m, tp, "myconstant") + c = gv.gep([ir.Constant(int32, x) for x in (0, 1)]) + self.assertEqual(str(c), + 'getelementptr ({float, i1}, {float, i1}* @"myconstant", i32 0, i32 1)') # noqa E501 + self.assertEqual(c.type, ir.PointerType(int1)) + + const = ir.Constant(tp, None) + with self.assertRaises(TypeError): + const.gep([ir.Constant(int32, 0)]) + + const_ptr = ir.Constant(tp.as_pointer(), None) + c2 = const_ptr.gep([ir.Constant(int32, 0)]) + self.assertEqual(str(c2), + 'getelementptr ({float, i1}, {float, i1}* null, i32 0)') # noqa E501 + self.assertEqual(c.type, ir.PointerType(int1)) + + def test_gep_addrspace_globalvar(self): + m = self.module() + tp = ir.LiteralStructType((flt, int1)) + addrspace = 4 + + gv = ir.GlobalVariable(m, tp, "myconstant", addrspace=addrspace) + self.assertEqual(gv.addrspace, addrspace) + c = gv.gep([ir.Constant(int32, x) for x in (0, 1)]) + self.assertEqual(c.type.addrspace, addrspace) + self.assertEqual(str(c), + ('getelementptr ({float, i1}, {float, i1} ' + 'addrspace(4)* @"myconstant", i32 0, i32 1)')) + self.assertEqual(c.type, ir.PointerType(int1, addrspace=addrspace)) + + def test_trunc(self): + c = ir.Constant(int64, 1).trunc(int32) + self.assertEqual(str(c), 'trunc (i64 1 to i32)') + + def test_zext(self): + c = ir.Constant(int32, 1).zext(int64) + self.assertEqual(str(c), 'zext (i32 1 to i64)') + + def test_sext(self): + c = ir.Constant(int32, -1).sext(int64) + self.assertEqual(str(c), 'sext (i32 -1 to i64)') + + def test_fptrunc(self): + c = ir.Constant(flt, 1).fptrunc(hlf) + self.assertEqual(str(c), 'fptrunc (float 0x3ff0000000000000 to half)') + + def test_fpext(self): + c = ir.Constant(flt, 1).fpext(dbl) + self.assertEqual(str(c), 'fpext (float 0x3ff0000000000000 to double)') + + def test_bitcast(self): + m = self.module() + gv = ir.GlobalVariable(m, int32, "myconstant") + c = gv.bitcast(int64.as_pointer()) + self.assertEqual(str(c), 'bitcast (i32* @"myconstant" to i64*)') + + def test_fptoui(self): + c = ir.Constant(flt, 1).fptoui(int32) + self.assertEqual(str(c), 'fptoui (float 0x3ff0000000000000 to i32)') + + def test_uitofp(self): + c = ir.Constant(int32, 1).uitofp(flt) + self.assertEqual(str(c), 'uitofp (i32 1 to float)') + + def test_fptosi(self): + c = ir.Constant(flt, 1).fptosi(int32) + self.assertEqual(str(c), 'fptosi (float 0x3ff0000000000000 to i32)') + + def test_sitofp(self): + c = ir.Constant(int32, 1).sitofp(flt) + self.assertEqual(str(c), 'sitofp (i32 1 to float)') + + def test_ptrtoint_1(self): + ptr = ir.Constant(int64.as_pointer(), None) + one = ir.Constant(int32, 1) + c = ptr.ptrtoint(int32) + + self.assertRaises(TypeError, one.ptrtoint, int64) + self.assertRaises(TypeError, ptr.ptrtoint, flt) + self.assertEqual(str(c), 'ptrtoint (i64* null to i32)') + + def test_ptrtoint_2(self): + m = self.module() + gv = ir.GlobalVariable(m, int32, "myconstant") + c = gv.ptrtoint(int64) + self.assertEqual(str(c), 'ptrtoint (i32* @"myconstant" to i64)') + + self.assertRaisesRegex( + TypeError, + r"can only ptrtoint\(\) to integer type, not 'i64\*'", + gv.ptrtoint, + int64.as_pointer()) + + c2 = ir.Constant(int32, 0) + self.assertRaisesRegex( + TypeError, + r"can only call ptrtoint\(\) on pointer type, not 'i32'", + c2.ptrtoint, + int64) + + def test_inttoptr(self): + one = ir.Constant(int32, 1) + pi = ir.Constant(flt, 3.14) + c = one.inttoptr(int64.as_pointer()) + + self.assertRaises(TypeError, one.inttoptr, int64) + self.assertRaises(TypeError, pi.inttoptr, int64.as_pointer()) + self.assertEqual(str(c), 'inttoptr (i32 1 to i64*)') + + def test_neg(self): + one = ir.Constant(int32, 1) + self.assertEqual(str(one.neg()), 'sub (i32 0, i32 1)') + + def test_not(self): + one = ir.Constant(int32, 1) + self.assertEqual(str(one.not_()), 'xor (i32 1, i32 -1)') + + def test_fneg(self): + one = ir.Constant(flt, 1) + self.assertEqual(str(one.fneg()), 'fneg (float 0x3ff0000000000000)') + + def test_int_binops(self): + one = ir.Constant(int32, 1) + two = ir.Constant(int32, 2) + + oracle = {one.shl: 'shl', one.lshr: 'lshr', one.ashr: 'ashr', + one.add: 'add', one.sub: 'sub', one.mul: 'mul', + one.udiv: 'udiv', one.sdiv: 'sdiv', one.urem: 'urem', + one.srem: 'srem', one.or_: 'or', one.and_: 'and', + one.xor: 'xor'} + for fn, irop in oracle.items(): + self.assertEqual(str(fn(two)), irop + ' (i32 1, i32 2)') + + # unsigned integer compare + oracle = {'==': 'eq', '!=': 'ne', '>': + 'ugt', '>=': 'uge', '<': 'ult', '<=': 'ule'} + for cop, cond in oracle.items(): + actual = str(one.icmp_unsigned(cop, two)) + expected = 'icmp ' + cond + ' (i32 1, i32 2)' + self.assertEqual(actual, expected) + + # signed integer compare + oracle = {'==': 'eq', '!=': 'ne', + '>': 'sgt', '>=': 'sge', '<': 'slt', '<=': 'sle'} + for cop, cond in oracle.items(): + actual = str(one.icmp_signed(cop, two)) + expected = 'icmp ' + cond + ' (i32 1, i32 2)' + self.assertEqual(actual, expected) + + def test_flt_binops(self): + one = ir.Constant(flt, 1) + two = ir.Constant(flt, 2) + + oracle = {one.fadd: 'fadd', one.fsub: 'fsub', one.fmul: 'fmul', + one.fdiv: 'fdiv', one.frem: 'frem'} + for fn, irop in oracle.items(): + actual = str(fn(two)) + expected = irop + (' (float 0x3ff0000000000000,' + ' float 0x4000000000000000)') + self.assertEqual(actual, expected) + + # ordered float compare + oracle = {'==': 'oeq', '!=': 'one', '>': 'ogt', '>=': 'oge', + '<': 'olt', '<=': 'ole'} + for cop, cond in oracle.items(): + actual = str(one.fcmp_ordered(cop, two)) + expected = 'fcmp ' + cond + (' (float 0x3ff0000000000000,' + ' float 0x4000000000000000)') + self.assertEqual(actual, expected) + + # unordered float compare + oracle = {'==': 'ueq', '!=': 'une', '>': 'ugt', '>=': 'uge', + '<': 'ult', '<=': 'ule'} + for cop, cond in oracle.items(): + actual = str(one.fcmp_unordered(cop, two)) + expected = 'fcmp ' + cond + (' (float 0x3ff0000000000000,' + ' float 0x4000000000000000)') + self.assertEqual(actual, expected) + + +class TestTransforms(TestBase): + def test_call_transform(self): + mod = ir.Module() + foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "foo") + bar = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "bar") + builder = ir.IRBuilder() + builder.position_at_end(foo.append_basic_block()) + call = builder.call(foo, ()) + self.assertEqual(call.callee, foo) + modified = ir.replace_all_calls(mod, foo, bar) + self.assertIn(call, modified) + self.assertNotEqual(call.callee, foo) + self.assertEqual(call.callee, bar) + + +class TestSingleton(TestBase): + def test_undefined(self): + self.assertIs(ir.Undefined, ir.values._Undefined()) + self.assertIs(ir.Undefined, copy.copy(ir.Undefined)) + self.assertIs(ir.Undefined, copy.deepcopy(ir.Undefined)) + self.assert_pickle_correctly(ir.Undefined) + + +if __name__ == '__main__': + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/llvmlite/tests/test_refprune.py b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_refprune.py new file mode 100644 index 0000000000000000000000000000000000000000..0c4208a265292b098596cd38487fa04c0e49a80c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/llvmlite/tests/test_refprune.py @@ -0,0 +1,557 @@ +import unittest +from llvmlite import ir +from llvmlite import binding as llvm +from llvmlite.tests import TestCase + +from . import refprune_proto as proto + + +def _iterate_cases(generate_test): + def wrap(fn): + def wrapped(self): + return generate_test(self, fn) + wrapped.__doc__ = f"generated test for {fn.__module__}.{fn.__name__}" + return wrapped + + for k, case_fn in proto.__dict__.items(): + if k.startswith('case'): + yield f'test_{k}', wrap(case_fn) + + +class TestRefPrunePrototype(TestCase): + """ + Test that the prototype is working. + """ + def generate_test(self, case_gen): + nodes, edges, expected = case_gen() + got = proto.FanoutAlgorithm(nodes, edges).run() + self.assertEqual(expected, got) + + # Generate tests + for name, case in _iterate_cases(generate_test): + locals()[name] = case + + +ptr_ty = ir.IntType(8).as_pointer() + + +class TestRefPrunePass(TestCase): + """ + Test that the C++ implementation matches the expected behavior as for + the prototype. + + This generates a LLVM module for each test case, runs the pruner and checks + that the expected results are achieved. + """ + + def make_incref(self, m): + fnty = ir.FunctionType(ir.VoidType(), [ptr_ty]) + return ir.Function(m, fnty, name='NRT_incref') + + def make_decref(self, m): + fnty = ir.FunctionType(ir.VoidType(), [ptr_ty]) + return ir.Function(m, fnty, name='NRT_decref') + + def make_switcher(self, m): + fnty = ir.FunctionType(ir.IntType(32), ()) + return ir.Function(m, fnty, name='switcher') + + def make_brancher(self, m): + fnty = ir.FunctionType(ir.IntType(1), ()) + return ir.Function(m, fnty, name='brancher') + + def generate_ir(self, nodes, edges): + # Build LLVM module for the CFG + m = ir.Module() + + incref_fn = self.make_incref(m) + decref_fn = self.make_decref(m) + switcher_fn = self.make_switcher(m) + brancher_fn = self.make_brancher(m) + + fnty = ir.FunctionType(ir.VoidType(), [ptr_ty]) + fn = ir.Function(m, fnty, name='main') + [ptr] = fn.args + ptr.name = 'mem' + # populate the BB nodes + bbmap = {} + for bb in edges: + bbmap[bb] = fn.append_basic_block(bb) + # populate the BB + builder = ir.IRBuilder() + for bb, jump_targets in edges.items(): + builder.position_at_end(bbmap[bb]) + # Insert increfs and decrefs + for action in nodes[bb]: + if action == 'incref': + builder.call(incref_fn, [ptr]) + elif action == 'decref': + builder.call(decref_fn, [ptr]) + else: + raise AssertionError('unreachable') + + # Insert the terminator. + # Switch base on the number of jump targets. + n_targets = len(jump_targets) + if n_targets == 0: + builder.ret_void() + elif n_targets == 1: + [dst] = jump_targets + builder.branch(bbmap[dst]) + elif n_targets == 2: + [left, right] = jump_targets + sel = builder.call(brancher_fn, ()) + builder.cbranch(sel, bbmap[left], bbmap[right]) + elif n_targets > 2: + sel = builder.call(switcher_fn, ()) + [head, *tail] = jump_targets + + sw = builder.switch(sel, default=bbmap[head]) + for i, dst in enumerate(tail): + sw.add_case(sel.type(i), bbmap[dst]) + else: + raise AssertionError('unreachable') + + return m + + def apply_refprune(self, irmod): + mod = llvm.parse_assembly(str(irmod)) + pm = llvm.ModulePassManager() + pm.add_refprune_pass() + pm.run(mod) + return mod + + def check(self, mod, expected, nodes): + # preprocess incref/decref locations + d = {} + for k, vs in nodes.items(): + n_incref = vs.count('incref') + n_decref = vs.count('decref') + d[k] = {'incref': n_incref, 'decref': n_decref} + for k, stats in d.items(): + if expected.get(k): + stats['incref'] -= 1 + for dec_bb in expected[k]: + d[dec_bb]['decref'] -= 1 + + # find the main function + for f in mod.functions: + if f.name == 'main': + break + # check each BB + for bb in f.blocks: + stats = d[bb.name] + text = str(bb) + n_incref = text.count('NRT_incref') + n_decref = text.count('NRT_decref') + self.assertEqual(stats['incref'], n_incref, msg=f'BB {bb}') + self.assertEqual(stats['decref'], n_decref, msg=f'BB {bb}') + + def generate_test(self, case_gen): + nodes, edges, expected = case_gen() + irmod = self.generate_ir(nodes, edges) + outmod = self.apply_refprune(irmod) + self.check(outmod, expected, nodes) + + # Generate tests + for name, case in _iterate_cases(generate_test): + locals()[name] = case + + +class BaseTestByIR(TestCase): + refprune_bitmask = 0 + + prologue = r""" +declare void @NRT_incref(i8* %ptr) +declare void @NRT_decref(i8* %ptr) +""" + + def check(self, irmod, subgraph_limit=None): + mod = llvm.parse_assembly(f"{self.prologue}\n{irmod}") + pm = llvm.ModulePassManager() + if subgraph_limit is None: + pm.add_refprune_pass(self.refprune_bitmask) + else: + pm.add_refprune_pass(self.refprune_bitmask, + subgraph_limit=subgraph_limit) + before = llvm.dump_refprune_stats() + pm.run(mod) + after = llvm.dump_refprune_stats() + return mod, after - before + + +class TestPerBB(BaseTestByIR): + refprune_bitmask = llvm.RefPruneSubpasses.PER_BB + + per_bb_ir_1 = r""" +define void @main(i8* %ptr) { + call void @NRT_incref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_bb_1(self): + mod, stats = self.check(self.per_bb_ir_1) + self.assertEqual(stats.basicblock, 2) + + per_bb_ir_2 = r""" +define void @main(i8* %ptr) { + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_bb_2(self): + mod, stats = self.check(self.per_bb_ir_2) + self.assertEqual(stats.basicblock, 4) + # not pruned + self.assertIn("call void @NRT_incref(i8* %ptr)", str(mod)) + + per_bb_ir_3 = r""" +define void @main(i8* %ptr, i8* %other) { + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %other) + ret void +} +""" + + def test_per_bb_3(self): + mod, stats = self.check(self.per_bb_ir_3) + self.assertEqual(stats.basicblock, 2) + # not pruned + self.assertIn("call void @NRT_decref(i8* %other)", str(mod)) + + per_bb_ir_4 = r""" +; reordered +define void @main(i8* %ptr, i8* %other) { + call void @NRT_incref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %other) + call void @NRT_incref(i8* %ptr) + ret void +} +""" + + def test_per_bb_4(self): + mod, stats = self.check(self.per_bb_ir_4) + self.assertEqual(stats.basicblock, 4) + # not pruned + self.assertIn("call void @NRT_decref(i8* %other)", str(mod)) + + +class TestDiamond(BaseTestByIR): + refprune_bitmask = llvm.RefPruneSubpasses.DIAMOND + + per_diamond_1 = r""" +define void @main(i8* %ptr) { +bb_A: + call void @NRT_incref(i8* %ptr) + br label %bb_B +bb_B: + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_diamond_1(self): + mod, stats = self.check(self.per_diamond_1) + self.assertEqual(stats.diamond, 2) + + per_diamond_2 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + br label %bb_D +bb_C: + br label %bb_D +bb_D: + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_diamond_2(self): + mod, stats = self.check(self.per_diamond_2) + self.assertEqual(stats.diamond, 2) + + per_diamond_3 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + br label %bb_D +bb_C: + call void @NRT_decref(i8* %ptr) ; reject because of decref in diamond + br label %bb_D +bb_D: + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_diamond_3(self): + mod, stats = self.check(self.per_diamond_3) + self.assertEqual(stats.diamond, 0) + + per_diamond_4 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_incref(i8* %ptr) ; extra incref will not affect prune + br label %bb_D +bb_C: + br label %bb_D +bb_D: + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_diamond_4(self): + mod, stats = self.check(self.per_diamond_4) + self.assertEqual(stats.diamond, 2) + + per_diamond_5 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + br label %bb_D +bb_C: + br label %bb_D +bb_D: + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_per_diamond_5(self): + mod, stats = self.check(self.per_diamond_5) + self.assertEqual(stats.diamond, 4) + + +class TestFanout(BaseTestByIR): + """More complex cases are tested in TestRefPrunePass + """ + + refprune_bitmask = llvm.RefPruneSubpasses.FANOUT + + fanout_1 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + ret void +bb_C: + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_fanout_1(self): + mod, stats = self.check(self.fanout_1) + self.assertEqual(stats.fanout, 3) + + fanout_2 = r""" +define void @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + ret void +bb_C: + call void @NRT_decref(i8* %ptr) + br label %bb_B ; illegal jump to other decref +} +""" + + def test_fanout_2(self): + mod, stats = self.check(self.fanout_2) + self.assertEqual(stats.fanout, 0) + + fanout_3 = r""" +define void @main(i8* %ptr, i1 %cond) { +bb_A: + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret void +bb_C: + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret void +} +""" + + def test_fanout_3(self): + mod, stats = self.check(self.fanout_3) + self.assertEqual(stats.fanout, 6) + + def test_fanout_3_limited(self): + # With subgraph limit at 1, it is essentially turning off the fanout + # pruner. + mod, stats = self.check(self.fanout_3, subgraph_limit=1) + self.assertEqual(stats.fanout, 0) + + +class TestFanoutRaise(BaseTestByIR): + refprune_bitmask = llvm.RefPruneSubpasses.FANOUT_RAISE + + fanout_raise_1 = r""" +define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + ret i32 0 +bb_C: + store i8* null, i8** %excinfo, !numba_exception_output !0 + ret i32 1 +} +!0 = !{i1 true} +""" + + def test_fanout_raise_1(self): + mod, stats = self.check(self.fanout_raise_1) + self.assertEqual(stats.fanout_raise, 2) + + fanout_raise_2 = r""" +define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + ret i32 0 +bb_C: + store i8* null, i8** %excinfo, !numba_exception_typo !0 ; bad metadata + ret i32 1 +} + +!0 = !{i1 true} +""" + + def test_fanout_raise_2(self): + # This is ensuring that fanout_raise is not pruning when the metadata + # is incorrectly named. + mod, stats = self.check(self.fanout_raise_2) + self.assertEqual(stats.fanout_raise, 0) + + fanout_raise_3 = r""" +define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + ret i32 0 +bb_C: + store i8* null, i8** %excinfo, !numba_exception_output !0 + ret i32 1 +} + +!0 = !{i32 1} ; ok; use i32 +""" + + def test_fanout_raise_3(self): + mod, stats = self.check(self.fanout_raise_3) + self.assertEqual(stats.fanout_raise, 2) + + fanout_raise_4 = r""" +define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + ret i32 1 ; BAD; all tails are raising without decref +bb_C: + ret i32 1 ; BAD; all tails are raising without decref +} + +!0 = !{i1 1} +""" + + def test_fanout_raise_4(self): + mod, stats = self.check(self.fanout_raise_4) + self.assertEqual(stats.fanout_raise, 0) + + fanout_raise_5 = r""" +define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + br i1 %cond, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + br label %common.ret +bb_C: + store i8* null, i8** %excinfo, !numba_exception_output !0 + br label %common.ret +common.ret: + %common.ret.op = phi i32 [ 0, %bb_B ], [ 1, %bb_C ] + ret i32 %common.ret.op +} +!0 = !{i1 1} +""" + + def test_fanout_raise_5(self): + mod, stats = self.check(self.fanout_raise_5) + self.assertEqual(stats.fanout_raise, 2) + + # test case 6 is from https://github.com/numba/llvmlite/issues/1023 + fanout_raise_6 = r""" +define i32 @main(i8* %ptr, i1 %cond1, i1 %cond2, i1 %cond3, i8** %excinfo) { +bb_A: + call void @NRT_incref(i8* %ptr) + call void @NRT_incref(i8* %ptr) + br i1 %cond1, label %bb_B, label %bb_C +bb_B: + call void @NRT_decref(i8* %ptr) + br i1 %cond2, label %bb_D, label %bb_E +bb_C: + store i8* null, i8** %excinfo, !numba_exception_output !0 + ret i32 1 +bb_D: + call void @NRT_decref(i8* %ptr) + ret i32 0 +bb_E: + call void @NRT_incref(i8* %ptr) + br i1 %cond3, label %bb_F, label %bb_C +bb_F: + call void @NRT_decref(i8* %ptr) + call void @NRT_decref(i8* %ptr) + ret i32 0 +} +!0 = !{i1 1} +""" + + def test_fanout_raise_6(self): + mod, stats = self.check(self.fanout_raise_6) + self.assertEqual(stats.fanout_raise, 7) + + +if __name__ == '__main__': + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__init__.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b8d8c08ac226d105a5a54c5f21040cd25107ae6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__init__.py @@ -0,0 +1,14 @@ +from .axislines import Axes +from .axislines import ( # noqa: F401 + AxesZero, AxisArtistHelper, AxisArtistHelperRectlinear, + GridHelperBase, GridHelperRectlinear, Subplot, SubplotZero) +from .axis_artist import AxisArtist, GridlinesCollection # noqa: F401 +from .grid_helper_curvelinear import GridHelperCurveLinear # noqa: F401 +from .floating_axes import FloatingAxes, FloatingSubplot # noqa: F401 +from mpl_toolkits.axes_grid1.parasite_axes import ( + host_axes_class_factory, parasite_axes_class_factory) + + +ParasiteAxes = parasite_axes_class_factory(Axes) +HostAxes = host_axes_class_factory(Axes) +SubplotHost = HostAxes diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/angle_helper.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/angle_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c00835276f9e76985d8e5cfc74390baa4fd392b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/angle_helper.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axes_divider.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axes_divider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ae796bc1ff372a278fe6ec8dc2d4d8686cf41a2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axes_divider.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axislines.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axislines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41c1dcf5c668428ec3cdeb31dc5bf60def2afd5a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/axislines.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/floating_axes.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/floating_axes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..884000c6653da67b3aff71eaa07b4e0222a77392 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/floating_axes.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_finder.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_finder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a658c4d246c1bcb430b2be92e488c8ee004d9aa1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/__pycache__/grid_finder.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/angle_helper.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/angle_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..1786cd70bcdb297de6c17374353cc2a49dfd0ae1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/angle_helper.py @@ -0,0 +1,394 @@ +import numpy as np +import math + +from mpl_toolkits.axisartist.grid_finder import ExtremeFinderSimple + + +def select_step_degree(dv): + + degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520] + degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360] + degree_factors = [1.] * len(degree_steps_) + + minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45] + minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30] + + minute_limits_ = np.array(minsec_limits_) / 60 + minute_factors = [60.] * len(minute_limits_) + + second_limits_ = np.array(minsec_limits_) / 3600 + second_factors = [3600.] * len(second_limits_) + + degree_limits = [*second_limits_, *minute_limits_, *degree_limits_] + degree_steps = [*minsec_steps_, *minsec_steps_, *degree_steps_] + degree_factors = [*second_factors, *minute_factors, *degree_factors] + + n = np.searchsorted(degree_limits, dv) + step = degree_steps[n] + factor = degree_factors[n] + + return step, factor + + +def select_step_hour(dv): + + hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36] + hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24] + hour_factors = [1.] * len(hour_steps_) + + minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45] + minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30] + + minute_limits_ = np.array(minsec_limits_) / 60 + minute_factors = [60.] * len(minute_limits_) + + second_limits_ = np.array(minsec_limits_) / 3600 + second_factors = [3600.] * len(second_limits_) + + hour_limits = [*second_limits_, *minute_limits_, *hour_limits_] + hour_steps = [*minsec_steps_, *minsec_steps_, *hour_steps_] + hour_factors = [*second_factors, *minute_factors, *hour_factors] + + n = np.searchsorted(hour_limits, dv) + step = hour_steps[n] + factor = hour_factors[n] + + return step, factor + + +def select_step_sub(dv): + + # subarcsec or degree + tmp = 10.**(int(math.log10(dv))-1.) + + factor = 1./tmp + + if 1.5*tmp >= dv: + step = 1 + elif 3.*tmp >= dv: + step = 2 + elif 7.*tmp >= dv: + step = 5 + else: + step = 1 + factor = 0.1*factor + + return step, factor + + +def select_step(v1, v2, nv, hour=False, include_last=True, + threshold_factor=3600.): + + if v1 > v2: + v1, v2 = v2, v1 + + dv = (v2 - v1) / nv + + if hour: + _select_step = select_step_hour + cycle = 24. + else: + _select_step = select_step_degree + cycle = 360. + + # for degree + if dv > 1 / threshold_factor: + step, factor = _select_step(dv) + else: + step, factor = select_step_sub(dv*threshold_factor) + + factor = factor * threshold_factor + + levs = np.arange(np.floor(v1 * factor / step), + np.ceil(v2 * factor / step) + 0.5, + dtype=int) * step + + # n : number of valid levels. If there is a cycle, e.g., [0, 90, 180, + # 270, 360], the grid line needs to be extended from 0 to 360, so + # we need to return the whole array. However, the last level (360) + # needs to be ignored often. In this case, so we return n=4. + + n = len(levs) + + # we need to check the range of values + # for example, -90 to 90, 0 to 360, + + if factor == 1. and levs[-1] >= levs[0] + cycle: # check for cycle + nv = int(cycle / step) + if include_last: + levs = levs[0] + np.arange(0, nv+1, 1) * step + else: + levs = levs[0] + np.arange(0, nv, 1) * step + + n = len(levs) + + return np.array(levs), n, factor + + +def select_step24(v1, v2, nv, include_last=True, threshold_factor=3600): + v1, v2 = v1 / 15, v2 / 15 + levs, n, factor = select_step(v1, v2, nv, hour=True, + include_last=include_last, + threshold_factor=threshold_factor) + return levs * 15, n, factor + + +def select_step360(v1, v2, nv, include_last=True, threshold_factor=3600): + return select_step(v1, v2, nv, hour=False, + include_last=include_last, + threshold_factor=threshold_factor) + + +class LocatorBase: + def __init__(self, nbins, include_last=True): + self.nbins = nbins + self._include_last = include_last + + def set_params(self, nbins=None): + if nbins is not None: + self.nbins = int(nbins) + + +class LocatorHMS(LocatorBase): + def __call__(self, v1, v2): + return select_step24(v1, v2, self.nbins, self._include_last) + + +class LocatorHM(LocatorBase): + def __call__(self, v1, v2): + return select_step24(v1, v2, self.nbins, self._include_last, + threshold_factor=60) + + +class LocatorH(LocatorBase): + def __call__(self, v1, v2): + return select_step24(v1, v2, self.nbins, self._include_last, + threshold_factor=1) + + +class LocatorDMS(LocatorBase): + def __call__(self, v1, v2): + return select_step360(v1, v2, self.nbins, self._include_last) + + +class LocatorDM(LocatorBase): + def __call__(self, v1, v2): + return select_step360(v1, v2, self.nbins, self._include_last, + threshold_factor=60) + + +class LocatorD(LocatorBase): + def __call__(self, v1, v2): + return select_step360(v1, v2, self.nbins, self._include_last, + threshold_factor=1) + + +class FormatterDMS: + deg_mark = r"^{\circ}" + min_mark = r"^{\prime}" + sec_mark = r"^{\prime\prime}" + + fmt_d = "$%d" + deg_mark + "$" + fmt_ds = r"$%d.%s" + deg_mark + "$" + + # %s for sign + fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark + "$" + fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark + "$" + + fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\," + fmt_s_partial = "%02d" + sec_mark + "$" + fmt_ss_partial = "%02d.%s" + sec_mark + "$" + + def _get_number_fraction(self, factor): + ## check for fractional numbers + number_fraction = None + # check for 60 + + for threshold in [1, 60, 3600]: + if factor <= threshold: + break + + d = factor // threshold + int_log_d = int(np.floor(np.log10(d))) + if 10**int_log_d == d and d != 1: + number_fraction = int_log_d + factor = factor // 10**int_log_d + return factor, number_fraction + + return factor, number_fraction + + def __call__(self, direction, factor, values): + if len(values) == 0: + return [] + + ss = np.sign(values) + signs = ["-" if v < 0 else "" for v in values] + + factor, number_fraction = self._get_number_fraction(factor) + + values = np.abs(values) + + if number_fraction is not None: + values, frac_part = divmod(values, 10 ** number_fraction) + frac_fmt = "%%0%dd" % (number_fraction,) + frac_str = [frac_fmt % (f1,) for f1 in frac_part] + + if factor == 1: + if number_fraction is None: + return [self.fmt_d % (s * int(v),) for s, v in zip(ss, values)] + else: + return [self.fmt_ds % (s * int(v), f1) + for s, v, f1 in zip(ss, values, frac_str)] + elif factor == 60: + deg_part, min_part = divmod(values, 60) + if number_fraction is None: + return [self.fmt_d_m % (s1, d1, m1) + for s1, d1, m1 in zip(signs, deg_part, min_part)] + else: + return [self.fmt_d_ms % (s, d1, m1, f1) + for s, d1, m1, f1 + in zip(signs, deg_part, min_part, frac_str)] + + elif factor == 3600: + if ss[-1] == -1: + inverse_order = True + values = values[::-1] + signs = signs[::-1] + else: + inverse_order = False + + l_hm_old = "" + r = [] + + deg_part, min_part_ = divmod(values, 3600) + min_part, sec_part = divmod(min_part_, 60) + + if number_fraction is None: + sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part] + else: + sec_str = [self.fmt_ss_partial % (s1, f1) + for s1, f1 in zip(sec_part, frac_str)] + + for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str): + l_hm = self.fmt_d_m_partial % (s, d1, m1) + if l_hm != l_hm_old: + l_hm_old = l_hm + l = l_hm + s1 + else: + l = "$" + s + s1 + r.append(l) + + if inverse_order: + return r[::-1] + else: + return r + + else: # factor > 3600. + return [r"$%s^{\circ}$" % v for v in ss*values] + + +class FormatterHMS(FormatterDMS): + deg_mark = r"^\mathrm{h}" + min_mark = r"^\mathrm{m}" + sec_mark = r"^\mathrm{s}" + + fmt_d = "$%d" + deg_mark + "$" + fmt_ds = r"$%d.%s" + deg_mark + "$" + + # %s for sign + fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark+"$" + fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark+"$" + + fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\," + fmt_s_partial = "%02d" + sec_mark + "$" + fmt_ss_partial = "%02d.%s" + sec_mark + "$" + + def __call__(self, direction, factor, values): # hour + return super().__call__(direction, factor, np.asarray(values) / 15) + + +class ExtremeFinderCycle(ExtremeFinderSimple): + # docstring inherited + + def __init__(self, nx, ny, + lon_cycle=360., lat_cycle=None, + lon_minmax=None, lat_minmax=(-90, 90)): + """ + This subclass handles the case where one or both coordinates should be + taken modulo 360, or be restricted to not exceed a specific range. + + Parameters + ---------- + nx, ny : int + The number of samples in each direction. + + lon_cycle, lat_cycle : 360 or None + If not None, values in the corresponding direction are taken modulo + *lon_cycle* or *lat_cycle*; in theory this can be any number but + the implementation actually assumes that it is 360 (if not None); + other values give nonsensical results. + + This is done by "unwrapping" the transformed grid coordinates so + that jumps are less than a half-cycle; then normalizing the span to + no more than a full cycle. + + For example, if values are in the union of the [0, 2] and + [358, 360] intervals (typically, angles measured modulo 360), the + values in the second interval are normalized to [-2, 0] instead so + that the values now cover [-2, 2]. If values are in a range of + [5, 1000], this gets normalized to [5, 365]. + + lon_minmax, lat_minmax : (float, float) or None + If not None, the computed bounding box is clipped to the given + range in the corresponding direction. + """ + self.nx, self.ny = nx, ny + self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle + self.lon_minmax = lon_minmax + self.lat_minmax = lat_minmax + + def __call__(self, transform_xy, x1, y1, x2, y2): + # docstring inherited + x, y = np.meshgrid( + np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)) + lon, lat = transform_xy(np.ravel(x), np.ravel(y)) + + # iron out jumps, but algorithm should be improved. + # This is just naive way of doing and my fail for some cases. + # Consider replacing this with numpy.unwrap + # We are ignoring invalid warnings. They are triggered when + # comparing arrays with NaNs using > We are already handling + # that correctly using np.nanmin and np.nanmax + with np.errstate(invalid='ignore'): + if self.lon_cycle is not None: + lon0 = np.nanmin(lon) + lon -= 360. * ((lon - lon0) > 180.) + if self.lat_cycle is not None: + lat0 = np.nanmin(lat) + lat -= 360. * ((lat - lat0) > 180.) + + lon_min, lon_max = np.nanmin(lon), np.nanmax(lon) + lat_min, lat_max = np.nanmin(lat), np.nanmax(lat) + + lon_min, lon_max, lat_min, lat_max = \ + self._add_pad(lon_min, lon_max, lat_min, lat_max) + + # check cycle + if self.lon_cycle: + lon_max = min(lon_max, lon_min + self.lon_cycle) + if self.lat_cycle: + lat_max = min(lat_max, lat_min + self.lat_cycle) + + if self.lon_minmax is not None: + min0 = self.lon_minmax[0] + lon_min = max(min0, lon_min) + max0 = self.lon_minmax[1] + lon_max = min(max0, lon_max) + + if self.lat_minmax is not None: + min0 = self.lat_minmax[0] + lat_min = max(min0, lat_min) + max0 = self.lat_minmax[1] + lat_max = min(max0, lat_max) + + return lon_min, lon_max, lat_min, lat_max diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axes_divider.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axes_divider.py new file mode 100644 index 0000000000000000000000000000000000000000..d0392be782d9c06404c3d83c2b8ca271bbb8fa72 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axes_divider.py @@ -0,0 +1,2 @@ +from mpl_toolkits.axes_grid1.axes_divider import ( # noqa + Divider, SubplotDivider, AxesDivider, make_axes_locatable) diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axis_artist.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axis_artist.py new file mode 100644 index 0000000000000000000000000000000000000000..b416d56abe6b7f194a1341637994a43ff70fe290 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axis_artist.py @@ -0,0 +1,1115 @@ +""" +The :mod:`.axis_artist` module implements custom artists to draw axis elements +(axis lines and labels, tick lines and labels, grid lines). + +Axis lines and labels and tick lines and labels are managed by the `AxisArtist` +class; grid lines are managed by the `GridlinesCollection` class. + +There is one `AxisArtist` per Axis; it can be accessed through +the ``axis`` dictionary of the parent Axes (which should be a +`mpl_toolkits.axislines.Axes`), e.g. ``ax.axis["bottom"]``. + +Children of the AxisArtist are accessed as attributes: ``.line`` and ``.label`` +for the axis line and label, ``.major_ticks``, ``.major_ticklabels``, +``.minor_ticks``, ``.minor_ticklabels`` for the tick lines and labels (e.g. +``ax.axis["bottom"].line``). + +Children properties (colors, fonts, line widths, etc.) can be set using +setters, e.g. :: + + # Make the major ticks of the bottom axis red. + ax.axis["bottom"].major_ticks.set_color("red") + +However, things like the locations of ticks, and their ticklabels need to be +changed from the side of the grid_helper. + +axis_direction +-------------- + +`AxisArtist`, `AxisLabel`, `TickLabels` have an *axis_direction* attribute, +which adjusts the location, angle, etc. The *axis_direction* must be one of +"left", "right", "bottom", "top", and follows the Matplotlib convention for +rectangular axis. + +For example, for the *bottom* axis (the left and right is relative to the +direction of the increasing coordinate), + +* ticklabels and axislabel are on the right +* ticklabels and axislabel have text angle of 0 +* ticklabels are baseline, center-aligned +* axislabel is top, center-aligned + +The text angles are actually relative to (90 + angle of the direction to the +ticklabel), which gives 0 for bottom axis. + +=================== ====== ======== ====== ======== +Property left bottom right top +=================== ====== ======== ====== ======== +ticklabel location left right right left +axislabel location left right right left +ticklabel angle 90 0 -90 180 +axislabel angle 180 0 0 180 +ticklabel va center baseline center baseline +axislabel va center top center bottom +ticklabel ha right center right center +axislabel ha right center right center +=================== ====== ======== ====== ======== + +Ticks are by default direct opposite side of the ticklabels. To make ticks to +the same side of the ticklabels, :: + + ax.axis["bottom"].major_ticks.set_tick_out(True) + +The following attributes can be customized (use the ``set_xxx`` methods): + +* `Ticks`: ticksize, tick_out +* `TickLabels`: pad +* `AxisLabel`: pad +""" + +# FIXME : +# angles are given in data coordinate - need to convert it to canvas coordinate + + +from operator import methodcaller + +import numpy as np + +import matplotlib as mpl +from matplotlib import _api, cbook +import matplotlib.artist as martist +import matplotlib.colors as mcolors +import matplotlib.text as mtext +from matplotlib.collections import LineCollection +from matplotlib.lines import Line2D +from matplotlib.patches import PathPatch +from matplotlib.path import Path +from matplotlib.transforms import ( + Affine2D, Bbox, IdentityTransform, ScaledTranslation) + +from .axisline_style import AxislineStyle + + +class AttributeCopier: + def get_ref_artist(self): + """ + Return the underlying artist that actually defines some properties + (e.g., color) of this artist. + """ + raise RuntimeError("get_ref_artist must overridden") + + def get_attribute_from_ref_artist(self, attr_name): + getter = methodcaller("get_" + attr_name) + prop = getter(super()) + return getter(self.get_ref_artist()) if prop == "auto" else prop + + +class Ticks(AttributeCopier, Line2D): + """ + Ticks are derived from `.Line2D`, and note that ticks themselves + are markers. Thus, you should use set_mec, set_mew, etc. + + To change the tick size (length), you need to use + `set_ticksize`. To change the direction of the ticks (ticks are + in opposite direction of ticklabels by default), use + ``set_tick_out(False)`` + """ + + def __init__(self, ticksize, tick_out=False, *, axis=None, **kwargs): + self._ticksize = ticksize + self.locs_angles_labels = [] + + self.set_tick_out(tick_out) + + self._axis = axis + if self._axis is not None: + if "color" not in kwargs: + kwargs["color"] = "auto" + if "mew" not in kwargs and "markeredgewidth" not in kwargs: + kwargs["markeredgewidth"] = "auto" + + Line2D.__init__(self, [0.], [0.], **kwargs) + self.set_snap(True) + + def get_ref_artist(self): + # docstring inherited + return self._axis.majorTicks[0].tick1line + + def set_color(self, color): + # docstring inherited + # Unlike the base Line2D.set_color, this also supports "auto". + if not cbook._str_equal(color, "auto"): + mcolors._check_color_like(color=color) + self._color = color + self.stale = True + + def get_color(self): + return self.get_attribute_from_ref_artist("color") + + def get_markeredgecolor(self): + return self.get_attribute_from_ref_artist("markeredgecolor") + + def get_markeredgewidth(self): + return self.get_attribute_from_ref_artist("markeredgewidth") + + def set_tick_out(self, b): + """Set whether ticks are drawn inside or outside the axes.""" + self._tick_out = b + + def get_tick_out(self): + """Return whether ticks are drawn inside or outside the axes.""" + return self._tick_out + + def set_ticksize(self, ticksize): + """Set length of the ticks in points.""" + self._ticksize = ticksize + + def get_ticksize(self): + """Return length of the ticks in points.""" + return self._ticksize + + def set_locs_angles(self, locs_angles): + self.locs_angles = locs_angles + + _tickvert_path = Path([[0., 0.], [1., 0.]]) + + def draw(self, renderer): + if not self.get_visible(): + return + + gc = renderer.new_gc() + gc.set_foreground(self.get_markeredgecolor()) + gc.set_linewidth(self.get_markeredgewidth()) + gc.set_alpha(self._alpha) + + path_trans = self.get_transform() + marker_transform = (Affine2D() + .scale(renderer.points_to_pixels(self._ticksize))) + if self.get_tick_out(): + marker_transform.rotate_deg(180) + + for loc, angle in self.locs_angles: + locs = path_trans.transform_non_affine(np.array([loc])) + if self.axes and not self.axes.viewLim.contains(*locs[0]): + continue + renderer.draw_markers( + gc, self._tickvert_path, + marker_transform + Affine2D().rotate_deg(angle), + Path(locs), path_trans.get_affine()) + + gc.restore() + + +class LabelBase(mtext.Text): + """ + A base class for `.AxisLabel` and `.TickLabels`. The position and + angle of the text are calculated by the offset_ref_angle, + text_ref_angle, and offset_radius attributes. + """ + + def __init__(self, *args, **kwargs): + self.locs_angles_labels = [] + self._ref_angle = 0 + self._offset_radius = 0. + + super().__init__(*args, **kwargs) + + self.set_rotation_mode("anchor") + self._text_follow_ref_angle = True + + @property + def _text_ref_angle(self): + if self._text_follow_ref_angle: + return self._ref_angle + 90 + else: + return 0 + + @property + def _offset_ref_angle(self): + return self._ref_angle + + _get_opposite_direction = {"left": "right", + "right": "left", + "top": "bottom", + "bottom": "top"}.__getitem__ + + def draw(self, renderer): + if not self.get_visible(): + return + + # save original and adjust some properties + tr = self.get_transform() + angle_orig = self.get_rotation() + theta = np.deg2rad(self._offset_ref_angle) + dd = self._offset_radius + dx, dy = dd * np.cos(theta), dd * np.sin(theta) + + self.set_transform(tr + Affine2D().translate(dx, dy)) + self.set_rotation(self._text_ref_angle + angle_orig) + super().draw(renderer) + # restore original properties + self.set_transform(tr) + self.set_rotation(angle_orig) + + def get_window_extent(self, renderer=None): + if renderer is None: + renderer = self.get_figure(root=True)._get_renderer() + + # save original and adjust some properties + tr = self.get_transform() + angle_orig = self.get_rotation() + theta = np.deg2rad(self._offset_ref_angle) + dd = self._offset_radius + dx, dy = dd * np.cos(theta), dd * np.sin(theta) + + self.set_transform(tr + Affine2D().translate(dx, dy)) + self.set_rotation(self._text_ref_angle + angle_orig) + bbox = super().get_window_extent(renderer).frozen() + # restore original properties + self.set_transform(tr) + self.set_rotation(angle_orig) + + return bbox + + +class AxisLabel(AttributeCopier, LabelBase): + """ + Axis label. Derived from `.Text`. The position of the text is updated + in the fly, so changing text position has no effect. Otherwise, the + properties can be changed as a normal `.Text`. + + To change the pad between tick labels and axis label, use `set_pad`. + """ + + def __init__(self, *args, axis_direction="bottom", axis=None, **kwargs): + self._axis = axis + self._pad = 5 + self._external_pad = 0 # in pixels + LabelBase.__init__(self, *args, **kwargs) + self.set_axis_direction(axis_direction) + + def set_pad(self, pad): + """ + Set the internal pad in points. + + The actual pad will be the sum of the internal pad and the + external pad (the latter is set automatically by the `.AxisArtist`). + + Parameters + ---------- + pad : float + The internal pad in points. + """ + self._pad = pad + + def get_pad(self): + """ + Return the internal pad in points. + + See `.set_pad` for more details. + """ + return self._pad + + def get_ref_artist(self): + # docstring inherited + return self._axis.label + + def get_text(self): + # docstring inherited + t = super().get_text() + if t == "__from_axes__": + return self._axis.label.get_text() + return self._text + + _default_alignments = dict(left=("bottom", "center"), + right=("top", "center"), + bottom=("top", "center"), + top=("bottom", "center")) + + def set_default_alignment(self, d): + """ + Set the default alignment. See `set_axis_direction` for details. + + Parameters + ---------- + d : {"left", "bottom", "right", "top"} + """ + va, ha = _api.check_getitem(self._default_alignments, d=d) + self.set_va(va) + self.set_ha(ha) + + _default_angles = dict(left=180, + right=0, + bottom=0, + top=180) + + def set_default_angle(self, d): + """ + Set the default angle. See `set_axis_direction` for details. + + Parameters + ---------- + d : {"left", "bottom", "right", "top"} + """ + self.set_rotation(_api.check_getitem(self._default_angles, d=d)) + + def set_axis_direction(self, d): + """ + Adjust the text angle and text alignment of axis label + according to the matplotlib convention. + + ===================== ========== ========= ========== ========== + Property left bottom right top + ===================== ========== ========= ========== ========== + axislabel angle 180 0 0 180 + axislabel va center top center bottom + axislabel ha right center right center + ===================== ========== ========= ========== ========== + + Note that the text angles are actually relative to (90 + angle + of the direction to the ticklabel), which gives 0 for bottom + axis. + + Parameters + ---------- + d : {"left", "bottom", "right", "top"} + """ + self.set_default_alignment(d) + self.set_default_angle(d) + + def get_color(self): + return self.get_attribute_from_ref_artist("color") + + def draw(self, renderer): + if not self.get_visible(): + return + + self._offset_radius = \ + self._external_pad + renderer.points_to_pixels(self.get_pad()) + + super().draw(renderer) + + def get_window_extent(self, renderer=None): + if renderer is None: + renderer = self.get_figure(root=True)._get_renderer() + if not self.get_visible(): + return + + r = self._external_pad + renderer.points_to_pixels(self.get_pad()) + self._offset_radius = r + + bb = super().get_window_extent(renderer) + + return bb + + +class TickLabels(AxisLabel): # mtext.Text + """ + Tick labels. While derived from `.Text`, this single artist draws all + ticklabels. As in `.AxisLabel`, the position of the text is updated + in the fly, so changing text position has no effect. Otherwise, + the properties can be changed as a normal `.Text`. Unlike the + ticklabels of the mainline Matplotlib, properties of a single + ticklabel alone cannot be modified. + + To change the pad between ticks and ticklabels, use `~.AxisLabel.set_pad`. + """ + + def __init__(self, *, axis_direction="bottom", **kwargs): + super().__init__(**kwargs) + self.set_axis_direction(axis_direction) + self._axislabel_pad = 0 + + def get_ref_artist(self): + # docstring inherited + return self._axis.get_ticklabels()[0] + + def set_axis_direction(self, label_direction): + """ + Adjust the text angle and text alignment of ticklabels + according to the Matplotlib convention. + + The *label_direction* must be one of [left, right, bottom, top]. + + ===================== ========== ========= ========== ========== + Property left bottom right top + ===================== ========== ========= ========== ========== + ticklabel angle 90 0 -90 180 + ticklabel va center baseline center baseline + ticklabel ha right center right center + ===================== ========== ========= ========== ========== + + Note that the text angles are actually relative to (90 + angle + of the direction to the ticklabel), which gives 0 for bottom + axis. + + Parameters + ---------- + label_direction : {"left", "bottom", "right", "top"} + + """ + self.set_default_alignment(label_direction) + self.set_default_angle(label_direction) + self._axis_direction = label_direction + + def invert_axis_direction(self): + label_direction = self._get_opposite_direction(self._axis_direction) + self.set_axis_direction(label_direction) + + def _get_ticklabels_offsets(self, renderer, label_direction): + """ + Calculate the ticklabel offsets from the tick and their total heights. + + The offset only takes account the offset due to the vertical alignment + of the ticklabels: if axis direction is bottom and va is 'top', it will + return 0; if va is 'baseline', it will return (height-descent). + """ + whd_list = self.get_texts_widths_heights_descents(renderer) + + if not whd_list: + return 0, 0 + + r = 0 + va, ha = self.get_va(), self.get_ha() + + if label_direction == "left": + pad = max(w for w, h, d in whd_list) + if ha == "left": + r = pad + elif ha == "center": + r = .5 * pad + elif label_direction == "right": + pad = max(w for w, h, d in whd_list) + if ha == "right": + r = pad + elif ha == "center": + r = .5 * pad + elif label_direction == "bottom": + pad = max(h for w, h, d in whd_list) + if va == "bottom": + r = pad + elif va == "center": + r = .5 * pad + elif va == "baseline": + max_ascent = max(h - d for w, h, d in whd_list) + max_descent = max(d for w, h, d in whd_list) + r = max_ascent + pad = max_ascent + max_descent + elif label_direction == "top": + pad = max(h for w, h, d in whd_list) + if va == "top": + r = pad + elif va == "center": + r = .5 * pad + elif va == "baseline": + max_ascent = max(h - d for w, h, d in whd_list) + max_descent = max(d for w, h, d in whd_list) + r = max_descent + pad = max_ascent + max_descent + + # r : offset + # pad : total height of the ticklabels. This will be used to + # calculate the pad for the axislabel. + return r, pad + + _default_alignments = dict(left=("center", "right"), + right=("center", "left"), + bottom=("baseline", "center"), + top=("baseline", "center")) + + _default_angles = dict(left=90, + right=-90, + bottom=0, + top=180) + + def draw(self, renderer): + if not self.get_visible(): + self._axislabel_pad = self._external_pad + return + + r, total_width = self._get_ticklabels_offsets(renderer, + self._axis_direction) + + pad = self._external_pad + renderer.points_to_pixels(self.get_pad()) + self._offset_radius = r + pad + + for (x, y), a, l in self._locs_angles_labels: + if not l.strip(): + continue + self._ref_angle = a + self.set_x(x) + self.set_y(y) + self.set_text(l) + LabelBase.draw(self, renderer) + + # the value saved will be used to draw axislabel. + self._axislabel_pad = total_width + pad + + def set_locs_angles_labels(self, locs_angles_labels): + self._locs_angles_labels = locs_angles_labels + + def get_window_extents(self, renderer=None): + if renderer is None: + renderer = self.get_figure(root=True)._get_renderer() + + if not self.get_visible(): + self._axislabel_pad = self._external_pad + return [] + + bboxes = [] + + r, total_width = self._get_ticklabels_offsets(renderer, + self._axis_direction) + + pad = self._external_pad + renderer.points_to_pixels(self.get_pad()) + self._offset_radius = r + pad + + for (x, y), a, l in self._locs_angles_labels: + self._ref_angle = a + self.set_x(x) + self.set_y(y) + self.set_text(l) + bb = LabelBase.get_window_extent(self, renderer) + bboxes.append(bb) + + # the value saved will be used to draw axislabel. + self._axislabel_pad = total_width + pad + + return bboxes + + def get_texts_widths_heights_descents(self, renderer): + """ + Return a list of ``(width, height, descent)`` tuples for ticklabels. + + Empty labels are left out. + """ + whd_list = [] + for _loc, _angle, label in self._locs_angles_labels: + if not label.strip(): + continue + clean_line, ismath = self._preprocess_math(label) + whd = renderer.get_text_width_height_descent( + clean_line, self._fontproperties, ismath=ismath) + whd_list.append(whd) + return whd_list + + +class GridlinesCollection(LineCollection): + def __init__(self, *args, which="major", axis="both", **kwargs): + """ + Collection of grid lines. + + Parameters + ---------- + which : {"major", "minor"} + Which grid to consider. + axis : {"both", "x", "y"} + Which axis to consider. + *args, **kwargs + Passed to `.LineCollection`. + """ + self._which = which + self._axis = axis + super().__init__(*args, **kwargs) + self.set_grid_helper(None) + + def set_which(self, which): + """ + Select major or minor grid lines. + + Parameters + ---------- + which : {"major", "minor"} + """ + self._which = which + + def set_axis(self, axis): + """ + Select axis. + + Parameters + ---------- + axis : {"both", "x", "y"} + """ + self._axis = axis + + def set_grid_helper(self, grid_helper): + """ + Set grid helper. + + Parameters + ---------- + grid_helper : `.GridHelperBase` subclass + """ + self._grid_helper = grid_helper + + def draw(self, renderer): + if self._grid_helper is not None: + self._grid_helper.update_lim(self.axes) + gl = self._grid_helper.get_gridlines(self._which, self._axis) + self.set_segments([np.transpose(l) for l in gl]) + super().draw(renderer) + + +class AxisArtist(martist.Artist): + """ + An artist which draws axis (a line along which the n-th axes coord + is constant) line, ticks, tick labels, and axis label. + """ + + zorder = 2.5 + + @property + def LABELPAD(self): + return self.label.get_pad() + + @LABELPAD.setter + def LABELPAD(self, v): + self.label.set_pad(v) + + def __init__(self, axes, + helper, + offset=None, + axis_direction="bottom", + **kwargs): + """ + Parameters + ---------- + axes : `mpl_toolkits.axisartist.axislines.Axes` + helper : `~mpl_toolkits.axisartist.axislines.AxisArtistHelper` + """ + # axes is also used to follow the axis attribute (tick color, etc). + + super().__init__(**kwargs) + + self.axes = axes + + self._axis_artist_helper = helper + + if offset is None: + offset = (0, 0) + self.offset_transform = ScaledTranslation( + *offset, + Affine2D().scale(1 / 72) # points to inches. + + self.axes.get_figure(root=False).dpi_scale_trans) + + if axis_direction in ["left", "right"]: + self.axis = axes.yaxis + else: + self.axis = axes.xaxis + + self._axisline_style = None + self._axis_direction = axis_direction + + self._init_line() + self._init_ticks(**kwargs) + self._init_offsetText(axis_direction) + self._init_label() + + # axis direction + self._ticklabel_add_angle = 0. + self._axislabel_add_angle = 0. + self.set_axis_direction(axis_direction) + + # axis direction + + def set_axis_direction(self, axis_direction): + """ + Adjust the direction, text angle, and text alignment of tick labels + and axis labels following the Matplotlib convention for the rectangle + axes. + + The *axis_direction* must be one of [left, right, bottom, top]. + + ===================== ========== ========= ========== ========== + Property left bottom right top + ===================== ========== ========= ========== ========== + ticklabel direction "-" "+" "+" "-" + axislabel direction "-" "+" "+" "-" + ticklabel angle 90 0 -90 180 + ticklabel va center baseline center baseline + ticklabel ha right center right center + axislabel angle 180 0 0 180 + axislabel va center top center bottom + axislabel ha right center right center + ===================== ========== ========= ========== ========== + + Note that the direction "+" and "-" are relative to the direction of + the increasing coordinate. Also, the text angles are actually + relative to (90 + angle of the direction to the ticklabel), + which gives 0 for bottom axis. + + Parameters + ---------- + axis_direction : {"left", "bottom", "right", "top"} + """ + self.major_ticklabels.set_axis_direction(axis_direction) + self.label.set_axis_direction(axis_direction) + self._axis_direction = axis_direction + if axis_direction in ["left", "top"]: + self.set_ticklabel_direction("-") + self.set_axislabel_direction("-") + else: + self.set_ticklabel_direction("+") + self.set_axislabel_direction("+") + + def set_ticklabel_direction(self, tick_direction): + r""" + Adjust the direction of the tick labels. + + Note that the *tick_direction*\s '+' and '-' are relative to the + direction of the increasing coordinate. + + Parameters + ---------- + tick_direction : {"+", "-"} + """ + self._ticklabel_add_angle = _api.check_getitem( + {"+": 0, "-": 180}, tick_direction=tick_direction) + + def invert_ticklabel_direction(self): + self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360 + self.major_ticklabels.invert_axis_direction() + self.minor_ticklabels.invert_axis_direction() + + def set_axislabel_direction(self, label_direction): + r""" + Adjust the direction of the axis label. + + Note that the *label_direction*\s '+' and '-' are relative to the + direction of the increasing coordinate. + + Parameters + ---------- + label_direction : {"+", "-"} + """ + self._axislabel_add_angle = _api.check_getitem( + {"+": 0, "-": 180}, label_direction=label_direction) + + def get_transform(self): + return self.axes.transAxes + self.offset_transform + + def get_helper(self): + """ + Return axis artist helper instance. + """ + return self._axis_artist_helper + + def set_axisline_style(self, axisline_style=None, **kwargs): + """ + Set the axisline style. + + The new style is completely defined by the passed attributes. Existing + style attributes are forgotten. + + Parameters + ---------- + axisline_style : str or None + The line style, e.g. '->', optionally followed by a comma-separated + list of attributes. Alternatively, the attributes can be provided + as keywords. + + If *None* this returns a string containing the available styles. + + Examples + -------- + The following two commands are equal: + + >>> set_axisline_style("->,size=1.5") + >>> set_axisline_style("->", size=1.5) + """ + if axisline_style is None: + return AxislineStyle.pprint_styles() + + if isinstance(axisline_style, AxislineStyle._Base): + self._axisline_style = axisline_style + else: + self._axisline_style = AxislineStyle(axisline_style, **kwargs) + + self._init_line() + + def get_axisline_style(self): + """Return the current axisline style.""" + return self._axisline_style + + def _init_line(self): + """ + Initialize the *line* artist that is responsible to draw the axis line. + """ + tran = (self._axis_artist_helper.get_line_transform(self.axes) + + self.offset_transform) + + axisline_style = self.get_axisline_style() + if axisline_style is None: + self.line = PathPatch( + self._axis_artist_helper.get_line(self.axes), + color=mpl.rcParams['axes.edgecolor'], + fill=False, + linewidth=mpl.rcParams['axes.linewidth'], + capstyle=mpl.rcParams['lines.solid_capstyle'], + joinstyle=mpl.rcParams['lines.solid_joinstyle'], + transform=tran) + else: + self.line = axisline_style(self, transform=tran) + + def _draw_line(self, renderer): + self.line.set_path(self._axis_artist_helper.get_line(self.axes)) + if self.get_axisline_style() is not None: + self.line.set_line_mutation_scale(self.major_ticklabels.get_size()) + self.line.draw(renderer) + + def _init_ticks(self, **kwargs): + axis_name = self.axis.axis_name + + trans = (self._axis_artist_helper.get_tick_transform(self.axes) + + self.offset_transform) + + self.major_ticks = Ticks( + kwargs.get( + "major_tick_size", + mpl.rcParams[f"{axis_name}tick.major.size"]), + axis=self.axis, transform=trans) + self.minor_ticks = Ticks( + kwargs.get( + "minor_tick_size", + mpl.rcParams[f"{axis_name}tick.minor.size"]), + axis=self.axis, transform=trans) + + size = mpl.rcParams[f"{axis_name}tick.labelsize"] + self.major_ticklabels = TickLabels( + axis=self.axis, + axis_direction=self._axis_direction, + figure=self.axes.get_figure(root=False), + transform=trans, + fontsize=size, + pad=kwargs.get( + "major_tick_pad", mpl.rcParams[f"{axis_name}tick.major.pad"]), + ) + self.minor_ticklabels = TickLabels( + axis=self.axis, + axis_direction=self._axis_direction, + figure=self.axes.get_figure(root=False), + transform=trans, + fontsize=size, + pad=kwargs.get( + "minor_tick_pad", mpl.rcParams[f"{axis_name}tick.minor.pad"]), + ) + + def _get_tick_info(self, tick_iter): + """ + Return a pair of: + + - list of locs and angles for ticks + - list of locs, angles and labels for ticklabels. + """ + ticks_loc_angle = [] + ticklabels_loc_angle_label = [] + + ticklabel_add_angle = self._ticklabel_add_angle + + for loc, angle_normal, angle_tangent, label in tick_iter: + angle_label = angle_tangent - 90 + ticklabel_add_angle + angle_tick = (angle_normal + if 90 <= (angle_label - angle_normal) % 360 <= 270 + else angle_normal + 180) + ticks_loc_angle.append([loc, angle_tick]) + ticklabels_loc_angle_label.append([loc, angle_label, label]) + + return ticks_loc_angle, ticklabels_loc_angle_label + + def _update_ticks(self, renderer=None): + # set extra pad for major and minor ticklabels: use ticksize of + # majorticks even for minor ticks. not clear what is best. + + if renderer is None: + renderer = self.get_figure(root=True)._get_renderer() + + dpi_cor = renderer.points_to_pixels(1.) + if self.major_ticks.get_visible() and self.major_ticks.get_tick_out(): + ticklabel_pad = self.major_ticks._ticksize * dpi_cor + self.major_ticklabels._external_pad = ticklabel_pad + self.minor_ticklabels._external_pad = ticklabel_pad + else: + self.major_ticklabels._external_pad = 0 + self.minor_ticklabels._external_pad = 0 + + majortick_iter, minortick_iter = \ + self._axis_artist_helper.get_tick_iterators(self.axes) + + tick_loc_angle, ticklabel_loc_angle_label = \ + self._get_tick_info(majortick_iter) + self.major_ticks.set_locs_angles(tick_loc_angle) + self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label) + + tick_loc_angle, ticklabel_loc_angle_label = \ + self._get_tick_info(minortick_iter) + self.minor_ticks.set_locs_angles(tick_loc_angle) + self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label) + + def _draw_ticks(self, renderer): + self._update_ticks(renderer) + self.major_ticks.draw(renderer) + self.major_ticklabels.draw(renderer) + self.minor_ticks.draw(renderer) + self.minor_ticklabels.draw(renderer) + if (self.major_ticklabels.get_visible() + or self.minor_ticklabels.get_visible()): + self._draw_offsetText(renderer) + + _offsetText_pos = dict(left=(0, 1, "bottom", "right"), + right=(1, 1, "bottom", "left"), + bottom=(1, 0, "top", "right"), + top=(1, 1, "bottom", "right")) + + def _init_offsetText(self, direction): + x, y, va, ha = self._offsetText_pos[direction] + self.offsetText = mtext.Annotation( + "", + xy=(x, y), xycoords="axes fraction", + xytext=(0, 0), textcoords="offset points", + color=mpl.rcParams['xtick.color'], + horizontalalignment=ha, verticalalignment=va, + ) + self.offsetText.set_transform(IdentityTransform()) + self.axes._set_artist_props(self.offsetText) + + def _update_offsetText(self): + self.offsetText.set_text(self.axis.major.formatter.get_offset()) + self.offsetText.set_size(self.major_ticklabels.get_size()) + offset = (self.major_ticklabels.get_pad() + + self.major_ticklabels.get_size() + + 2) + self.offsetText.xyann = (0, offset) + + def _draw_offsetText(self, renderer): + self._update_offsetText() + self.offsetText.draw(renderer) + + def _init_label(self, **kwargs): + tr = (self._axis_artist_helper.get_axislabel_transform(self.axes) + + self.offset_transform) + self.label = AxisLabel( + 0, 0, "__from_axes__", + color="auto", + fontsize=kwargs.get("labelsize", mpl.rcParams['axes.labelsize']), + fontweight=mpl.rcParams['axes.labelweight'], + axis=self.axis, + transform=tr, + axis_direction=self._axis_direction, + ) + self.label.set_figure(self.axes.get_figure(root=False)) + labelpad = kwargs.get("labelpad", 5) + self.label.set_pad(labelpad) + + def _update_label(self, renderer): + if not self.label.get_visible(): + return + + if self._ticklabel_add_angle != self._axislabel_add_angle: + if ((self.major_ticks.get_visible() + and not self.major_ticks.get_tick_out()) + or (self.minor_ticks.get_visible() + and not self.major_ticks.get_tick_out())): + axislabel_pad = self.major_ticks._ticksize + else: + axislabel_pad = 0 + else: + axislabel_pad = max(self.major_ticklabels._axislabel_pad, + self.minor_ticklabels._axislabel_pad) + + self.label._external_pad = axislabel_pad + + xy, angle_tangent = \ + self._axis_artist_helper.get_axislabel_pos_angle(self.axes) + if xy is None: + return + + angle_label = angle_tangent - 90 + + x, y = xy + self.label._ref_angle = angle_label + self._axislabel_add_angle + self.label.set(x=x, y=y) + + def _draw_label(self, renderer): + self._update_label(renderer) + self.label.draw(renderer) + + def set_label(self, s): + # docstring inherited + self.label.set_text(s) + + def get_tightbbox(self, renderer=None): + if not self.get_visible(): + return + self._axis_artist_helper.update_lim(self.axes) + self._update_ticks(renderer) + self._update_label(renderer) + + self.line.set_path(self._axis_artist_helper.get_line(self.axes)) + if self.get_axisline_style() is not None: + self.line.set_line_mutation_scale(self.major_ticklabels.get_size()) + + bb = [ + *self.major_ticklabels.get_window_extents(renderer), + *self.minor_ticklabels.get_window_extents(renderer), + self.label.get_window_extent(renderer), + self.offsetText.get_window_extent(renderer), + self.line.get_window_extent(renderer), + ] + bb = [b for b in bb if b and (b.width != 0 or b.height != 0)] + if bb: + _bbox = Bbox.union(bb) + return _bbox + else: + return None + + @martist.allow_rasterization + def draw(self, renderer): + # docstring inherited + if not self.get_visible(): + return + renderer.open_group(__name__, gid=self.get_gid()) + self._axis_artist_helper.update_lim(self.axes) + self._draw_ticks(renderer) + self._draw_line(renderer) + self._draw_label(renderer) + renderer.close_group(__name__) + + def toggle(self, all=None, ticks=None, ticklabels=None, label=None): + """ + Toggle visibility of ticks, ticklabels, and (axis) label. + To turn all off, :: + + axis.toggle(all=False) + + To turn all off but ticks on :: + + axis.toggle(all=False, ticks=True) + + To turn all on but (axis) label off :: + + axis.toggle(all=True, label=False) + + """ + if all: + _ticks, _ticklabels, _label = True, True, True + elif all is not None: + _ticks, _ticklabels, _label = False, False, False + else: + _ticks, _ticklabels, _label = None, None, None + + if ticks is not None: + _ticks = ticks + if ticklabels is not None: + _ticklabels = ticklabels + if label is not None: + _label = label + + if _ticks is not None: + self.major_ticks.set_visible(_ticks) + self.minor_ticks.set_visible(_ticks) + if _ticklabels is not None: + self.major_ticklabels.set_visible(_ticklabels) + self.minor_ticklabels.set_visible(_ticklabels) + if _label is not None: + self.label.set_visible(_label) diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axisline_style.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axisline_style.py new file mode 100644 index 0000000000000000000000000000000000000000..7f25b98082ef52b9f162542a3eafaec3b9750fd0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axisline_style.py @@ -0,0 +1,193 @@ +""" +Provides classes to style the axis lines. +""" +import math + +import numpy as np + +import matplotlib as mpl +from matplotlib.patches import _Style, FancyArrowPatch +from matplotlib.path import Path +from matplotlib.transforms import IdentityTransform + + +class _FancyAxislineStyle: + class SimpleArrow(FancyArrowPatch): + """The artist class that will be returned for SimpleArrow style.""" + _ARROW_STYLE = "->" + + def __init__(self, axis_artist, line_path, transform, + line_mutation_scale): + self._axis_artist = axis_artist + self._line_transform = transform + self._line_path = line_path + self._line_mutation_scale = line_mutation_scale + + FancyArrowPatch.__init__(self, + path=self._line_path, + arrowstyle=self._ARROW_STYLE, + patchA=None, + patchB=None, + shrinkA=0., + shrinkB=0., + mutation_scale=line_mutation_scale, + mutation_aspect=None, + transform=IdentityTransform(), + ) + + def set_line_mutation_scale(self, scale): + self.set_mutation_scale(scale*self._line_mutation_scale) + + def _extend_path(self, path, mutation_size=10): + """ + Extend the path to make a room for drawing arrow. + """ + (x0, y0), (x1, y1) = path.vertices[-2:] + theta = math.atan2(y1 - y0, x1 - x0) + x2 = x1 + math.cos(theta) * mutation_size + y2 = y1 + math.sin(theta) * mutation_size + if path.codes is None: + return Path(np.concatenate([path.vertices, [[x2, y2]]])) + else: + return Path(np.concatenate([path.vertices, [[x2, y2]]]), + np.concatenate([path.codes, [Path.LINETO]])) + + def set_path(self, path): + self._line_path = path + + def draw(self, renderer): + """ + Draw the axis line. + 1) Transform the path to the display coordinate. + 2) Extend the path to make a room for arrow. + 3) Update the path of the FancyArrowPatch. + 4) Draw. + """ + path_in_disp = self._line_transform.transform_path(self._line_path) + mutation_size = self.get_mutation_scale() # line_mutation_scale() + extended_path = self._extend_path(path_in_disp, + mutation_size=mutation_size) + self._path_original = extended_path + FancyArrowPatch.draw(self, renderer) + + def get_window_extent(self, renderer=None): + + path_in_disp = self._line_transform.transform_path(self._line_path) + mutation_size = self.get_mutation_scale() # line_mutation_scale() + extended_path = self._extend_path(path_in_disp, + mutation_size=mutation_size) + self._path_original = extended_path + return FancyArrowPatch.get_window_extent(self, renderer) + + class FilledArrow(SimpleArrow): + """The artist class that will be returned for FilledArrow style.""" + _ARROW_STYLE = "-|>" + + def __init__(self, axis_artist, line_path, transform, + line_mutation_scale, facecolor): + super().__init__(axis_artist, line_path, transform, + line_mutation_scale) + self.set_facecolor(facecolor) + + +class AxislineStyle(_Style): + """ + A container class which defines style classes for AxisArtists. + + An instance of any axisline style class is a callable object, + whose call signature is :: + + __call__(self, axis_artist, path, transform) + + When called, this should return an `.Artist` with the following methods:: + + def set_path(self, path): + # set the path for axisline. + + def set_line_mutation_scale(self, scale): + # set the scale + + def draw(self, renderer): + # draw + """ + + _style_list = {} + + class _Base: + # The derived classes are required to be able to be initialized + # w/o arguments, i.e., all its argument (except self) must have + # the default values. + + def __init__(self): + """ + initialization. + """ + super().__init__() + + def __call__(self, axis_artist, transform): + """ + Given the AxisArtist instance, and transform for the path (set_path + method), return the Matplotlib artist for drawing the axis line. + """ + return self.new_line(axis_artist, transform) + + class SimpleArrow(_Base): + """ + A simple arrow. + """ + + ArrowAxisClass = _FancyAxislineStyle.SimpleArrow + + def __init__(self, size=1): + """ + Parameters + ---------- + size : float + Size of the arrow as a fraction of the ticklabel size. + """ + + self.size = size + super().__init__() + + def new_line(self, axis_artist, transform): + + linepath = Path([(0, 0), (0, 1)]) + axisline = self.ArrowAxisClass(axis_artist, linepath, transform, + line_mutation_scale=self.size) + return axisline + + _style_list["->"] = SimpleArrow + + class FilledArrow(SimpleArrow): + """ + An arrow with a filled head. + """ + + ArrowAxisClass = _FancyAxislineStyle.FilledArrow + + def __init__(self, size=1, facecolor=None): + """ + Parameters + ---------- + size : float + Size of the arrow as a fraction of the ticklabel size. + facecolor : :mpltype:`color`, default: :rc:`axes.edgecolor` + Fill color. + + .. versionadded:: 3.7 + """ + + if facecolor is None: + facecolor = mpl.rcParams['axes.edgecolor'] + self.size = size + self._facecolor = facecolor + super().__init__(size=size) + + def new_line(self, axis_artist, transform): + linepath = Path([(0, 0), (0, 1)]) + axisline = self.ArrowAxisClass(axis_artist, linepath, transform, + line_mutation_scale=self.size, + facecolor=self._facecolor) + return axisline + + _style_list["-|>"] = FilledArrow diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axislines.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axislines.py new file mode 100644 index 0000000000000000000000000000000000000000..8d06cb236269d00ed20a27297b59fa816338e79a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/axislines.py @@ -0,0 +1,479 @@ +""" +Axislines includes modified implementation of the Axes class. The +biggest difference is that the artists responsible for drawing the axis spine, +ticks, ticklabels and axis labels are separated out from Matplotlib's Axis +class. Originally, this change was motivated to support curvilinear +grid. Here are a few reasons that I came up with a new axes class: + +* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have + different ticks (tick locations and labels). This is not possible + with the current Matplotlib, although some twin axes trick can help. + +* Curvilinear grid. + +* angled ticks. + +In the new axes class, xaxis and yaxis is set to not visible by +default, and new set of artist (AxisArtist) are defined to draw axis +line, ticks, ticklabels and axis label. Axes.axis attribute serves as +a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist +instance responsible to draw left y-axis. The default Axes.axis contains +"bottom", "left", "top" and "right". + +AxisArtist can be considered as a container artist and has the following +children artists which will draw ticks, labels, etc. + +* line +* major_ticks, major_ticklabels +* minor_ticks, minor_ticklabels +* offsetText +* label + +Note that these are separate artists from `matplotlib.axis.Axis`, thus most +tick-related functions in Matplotlib won't work. For example, color and +markerwidth of the ``ax.axis["bottom"].major_ticks`` will follow those of +Axes.xaxis unless explicitly specified. + +In addition to AxisArtist, the Axes will have *gridlines* attribute, +which obviously draws grid lines. The gridlines needs to be separated +from the axis as some gridlines can never pass any axis. +""" + +import numpy as np + +import matplotlib as mpl +from matplotlib import _api +import matplotlib.axes as maxes +from matplotlib.path import Path +from mpl_toolkits.axes_grid1 import mpl_axes +from .axisline_style import AxislineStyle # noqa +from .axis_artist import AxisArtist, GridlinesCollection + + +class _AxisArtistHelperBase: + """ + Base class for axis helper. + + Subclasses should define the methods listed below. The *axes* + argument will be the ``.axes`` attribute of the caller artist. :: + + # Construct the spine. + + def get_line_transform(self, axes): + return transform + + def get_line(self, axes): + return path + + # Construct the label. + + def get_axislabel_transform(self, axes): + return transform + + def get_axislabel_pos_angle(self, axes): + return (x, y), angle + + # Construct the ticks. + + def get_tick_transform(self, axes): + return transform + + def get_tick_iterators(self, axes): + # A pair of iterables (one for major ticks, one for minor ticks) + # that yield (tick_position, tick_angle, tick_label). + return iter_major, iter_minor + """ + + def __init__(self, nth_coord): + self.nth_coord = nth_coord + + def update_lim(self, axes): + pass + + def get_nth_coord(self): + return self.nth_coord + + def _to_xy(self, values, const): + """ + Create a (*values.shape, 2)-shape array representing (x, y) pairs. + + The other coordinate is filled with the constant *const*. + + Example:: + + >>> self.nth_coord = 0 + >>> self._to_xy([1, 2, 3], const=0) + array([[1, 0], + [2, 0], + [3, 0]]) + """ + if self.nth_coord == 0: + return np.stack(np.broadcast_arrays(values, const), axis=-1) + elif self.nth_coord == 1: + return np.stack(np.broadcast_arrays(const, values), axis=-1) + else: + raise ValueError("Unexpected nth_coord") + + +class _FixedAxisArtistHelperBase(_AxisArtistHelperBase): + """Helper class for a fixed (in the axes coordinate) axis.""" + + @_api.delete_parameter("3.9", "nth_coord") + def __init__(self, loc, nth_coord=None): + """``nth_coord = 0``: x-axis; ``nth_coord = 1``: y-axis.""" + super().__init__(_api.check_getitem( + {"bottom": 0, "top": 0, "left": 1, "right": 1}, loc=loc)) + self._loc = loc + self._pos = {"bottom": 0, "top": 1, "left": 0, "right": 1}[loc] + # axis line in transAxes + self._path = Path(self._to_xy((0, 1), const=self._pos)) + + # LINE + + def get_line(self, axes): + return self._path + + def get_line_transform(self, axes): + return axes.transAxes + + # LABEL + + def get_axislabel_transform(self, axes): + return axes.transAxes + + def get_axislabel_pos_angle(self, axes): + """ + Return the label reference position in transAxes. + + get_label_transform() returns a transform of (transAxes+offset) + """ + return dict(left=((0., 0.5), 90), # (position, angle_tangent) + right=((1., 0.5), 90), + bottom=((0.5, 0.), 0), + top=((0.5, 1.), 0))[self._loc] + + # TICK + + def get_tick_transform(self, axes): + return [axes.get_xaxis_transform(), axes.get_yaxis_transform()][self.nth_coord] + + +class _FloatingAxisArtistHelperBase(_AxisArtistHelperBase): + def __init__(self, nth_coord, value): + self._value = value + super().__init__(nth_coord) + + def get_line(self, axes): + raise RuntimeError("get_line method should be defined by the derived class") + + +class FixedAxisArtistHelperRectilinear(_FixedAxisArtistHelperBase): + + @_api.delete_parameter("3.9", "nth_coord") + def __init__(self, axes, loc, nth_coord=None): + """ + nth_coord = along which coordinate value varies + in 2D, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis + """ + super().__init__(loc) + self.axis = [axes.xaxis, axes.yaxis][self.nth_coord] + + # TICK + + def get_tick_iterators(self, axes): + """tick_loc, tick_angle, tick_label""" + angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord] + + major = self.axis.major + major_locs = major.locator() + major_labels = major.formatter.format_ticks(major_locs) + + minor = self.axis.minor + minor_locs = minor.locator() + minor_labels = minor.formatter.format_ticks(minor_locs) + + tick_to_axes = self.get_tick_transform(axes) - axes.transAxes + + def _f(locs, labels): + for loc, label in zip(locs, labels): + c = self._to_xy(loc, const=self._pos) + # check if the tick point is inside axes + c2 = tick_to_axes.transform(c) + if mpl.transforms._interval_contains_close((0, 1), c2[self.nth_coord]): + yield c, angle_normal, angle_tangent, label + + return _f(major_locs, major_labels), _f(minor_locs, minor_labels) + + +class FloatingAxisArtistHelperRectilinear(_FloatingAxisArtistHelperBase): + + def __init__(self, axes, nth_coord, + passingthrough_point, axis_direction="bottom"): + super().__init__(nth_coord, passingthrough_point) + self._axis_direction = axis_direction + self.axis = [axes.xaxis, axes.yaxis][self.nth_coord] + + def get_line(self, axes): + fixed_coord = 1 - self.nth_coord + data_to_axes = axes.transData - axes.transAxes + p = data_to_axes.transform([self._value, self._value]) + return Path(self._to_xy((0, 1), const=p[fixed_coord])) + + def get_line_transform(self, axes): + return axes.transAxes + + def get_axislabel_transform(self, axes): + return axes.transAxes + + def get_axislabel_pos_angle(self, axes): + """ + Return the label reference position in transAxes. + + get_label_transform() returns a transform of (transAxes+offset) + """ + angle = [0, 90][self.nth_coord] + fixed_coord = 1 - self.nth_coord + data_to_axes = axes.transData - axes.transAxes + p = data_to_axes.transform([self._value, self._value]) + verts = self._to_xy(0.5, const=p[fixed_coord]) + return (verts, angle) if 0 <= verts[fixed_coord] <= 1 else (None, None) + + def get_tick_transform(self, axes): + return axes.transData + + def get_tick_iterators(self, axes): + """tick_loc, tick_angle, tick_label""" + angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord] + + major = self.axis.major + major_locs = major.locator() + major_labels = major.formatter.format_ticks(major_locs) + + minor = self.axis.minor + minor_locs = minor.locator() + minor_labels = minor.formatter.format_ticks(minor_locs) + + data_to_axes = axes.transData - axes.transAxes + + def _f(locs, labels): + for loc, label in zip(locs, labels): + c = self._to_xy(loc, const=self._value) + c1, c2 = data_to_axes.transform(c) + if 0 <= c1 <= 1 and 0 <= c2 <= 1: + yield c, angle_normal, angle_tangent, label + + return _f(major_locs, major_labels), _f(minor_locs, minor_labels) + + +class AxisArtistHelper: # Backcompat. + Fixed = _FixedAxisArtistHelperBase + Floating = _FloatingAxisArtistHelperBase + + +class AxisArtistHelperRectlinear: # Backcompat. + Fixed = FixedAxisArtistHelperRectilinear + Floating = FloatingAxisArtistHelperRectilinear + + +class GridHelperBase: + + def __init__(self): + self._old_limits = None + super().__init__() + + def update_lim(self, axes): + x1, x2 = axes.get_xlim() + y1, y2 = axes.get_ylim() + if self._old_limits != (x1, x2, y1, y2): + self._update_grid(x1, y1, x2, y2) + self._old_limits = (x1, x2, y1, y2) + + def _update_grid(self, x1, y1, x2, y2): + """Cache relevant computations when the axes limits have changed.""" + + def get_gridlines(self, which, axis): + """ + Return list of grid lines as a list of paths (list of points). + + Parameters + ---------- + which : {"both", "major", "minor"} + axis : {"both", "x", "y"} + """ + return [] + + +class GridHelperRectlinear(GridHelperBase): + + def __init__(self, axes): + super().__init__() + self.axes = axes + + @_api.delete_parameter( + "3.9", "nth_coord", addendum="'nth_coord' is now inferred from 'loc'.") + def new_fixed_axis( + self, loc, nth_coord=None, axis_direction=None, offset=None, axes=None): + if axes is None: + _api.warn_external( + "'new_fixed_axis' explicitly requires the axes keyword.") + axes = self.axes + if axis_direction is None: + axis_direction = loc + return AxisArtist(axes, FixedAxisArtistHelperRectilinear(axes, loc), + offset=offset, axis_direction=axis_direction) + + def new_floating_axis(self, nth_coord, value, axis_direction="bottom", axes=None): + if axes is None: + _api.warn_external( + "'new_floating_axis' explicitly requires the axes keyword.") + axes = self.axes + helper = FloatingAxisArtistHelperRectilinear( + axes, nth_coord, value, axis_direction) + axisline = AxisArtist(axes, helper, axis_direction=axis_direction) + axisline.line.set_clip_on(True) + axisline.line.set_clip_box(axisline.axes.bbox) + return axisline + + def get_gridlines(self, which="major", axis="both"): + """ + Return list of gridline coordinates in data coordinates. + + Parameters + ---------- + which : {"both", "major", "minor"} + axis : {"both", "x", "y"} + """ + _api.check_in_list(["both", "major", "minor"], which=which) + _api.check_in_list(["both", "x", "y"], axis=axis) + gridlines = [] + + if axis in ("both", "x"): + locs = [] + y1, y2 = self.axes.get_ylim() + if which in ("both", "major"): + locs.extend(self.axes.xaxis.major.locator()) + if which in ("both", "minor"): + locs.extend(self.axes.xaxis.minor.locator()) + gridlines.extend([[x, x], [y1, y2]] for x in locs) + + if axis in ("both", "y"): + x1, x2 = self.axes.get_xlim() + locs = [] + if self.axes.yaxis._major_tick_kw["gridOn"]: + locs.extend(self.axes.yaxis.major.locator()) + if self.axes.yaxis._minor_tick_kw["gridOn"]: + locs.extend(self.axes.yaxis.minor.locator()) + gridlines.extend([[x1, x2], [y, y]] for y in locs) + + return gridlines + + +class Axes(maxes.Axes): + + def __init__(self, *args, grid_helper=None, **kwargs): + self._axisline_on = True + self._grid_helper = grid_helper if grid_helper else GridHelperRectlinear(self) + super().__init__(*args, **kwargs) + self.toggle_axisline(True) + + def toggle_axisline(self, b=None): + if b is None: + b = not self._axisline_on + if b: + self._axisline_on = True + self.spines[:].set_visible(False) + self.xaxis.set_visible(False) + self.yaxis.set_visible(False) + else: + self._axisline_on = False + self.spines[:].set_visible(True) + self.xaxis.set_visible(True) + self.yaxis.set_visible(True) + + @property + def axis(self): + return self._axislines + + def clear(self): + # docstring inherited + + # Init gridlines before clear() as clear() calls grid(). + self.gridlines = gridlines = GridlinesCollection( + [], + colors=mpl.rcParams['grid.color'], + linestyles=mpl.rcParams['grid.linestyle'], + linewidths=mpl.rcParams['grid.linewidth']) + self._set_artist_props(gridlines) + gridlines.set_grid_helper(self.get_grid_helper()) + + super().clear() + + # clip_path is set after Axes.clear(): that's when a patch is created. + gridlines.set_clip_path(self.axes.patch) + + # Init axis artists. + self._axislines = mpl_axes.Axes.AxisDict(self) + new_fixed_axis = self.get_grid_helper().new_fixed_axis + self._axislines.update({ + loc: new_fixed_axis(loc=loc, axes=self, axis_direction=loc) + for loc in ["bottom", "top", "left", "right"]}) + for axisline in [self._axislines["top"], self._axislines["right"]]: + axisline.label.set_visible(False) + axisline.major_ticklabels.set_visible(False) + axisline.minor_ticklabels.set_visible(False) + + def get_grid_helper(self): + return self._grid_helper + + def grid(self, visible=None, which='major', axis="both", **kwargs): + """ + Toggle the gridlines, and optionally set the properties of the lines. + """ + # There are some discrepancies in the behavior of grid() between + # axes_grid and Matplotlib, because axes_grid explicitly sets the + # visibility of the gridlines. + super().grid(visible, which=which, axis=axis, **kwargs) + if not self._axisline_on: + return + if visible is None: + visible = (self.axes.xaxis._minor_tick_kw["gridOn"] + or self.axes.xaxis._major_tick_kw["gridOn"] + or self.axes.yaxis._minor_tick_kw["gridOn"] + or self.axes.yaxis._major_tick_kw["gridOn"]) + self.gridlines.set(which=which, axis=axis, visible=visible) + self.gridlines.set(**kwargs) + + def get_children(self): + if self._axisline_on: + children = [*self._axislines.values(), self.gridlines] + else: + children = [] + children.extend(super().get_children()) + return children + + def new_fixed_axis(self, loc, offset=None): + return self.get_grid_helper().new_fixed_axis(loc, offset=offset, axes=self) + + def new_floating_axis(self, nth_coord, value, axis_direction="bottom"): + return self.get_grid_helper().new_floating_axis( + nth_coord, value, axis_direction=axis_direction, axes=self) + + +class AxesZero(Axes): + + def clear(self): + super().clear() + new_floating_axis = self.get_grid_helper().new_floating_axis + self._axislines.update( + xzero=new_floating_axis( + nth_coord=0, value=0., axis_direction="bottom", axes=self), + yzero=new_floating_axis( + nth_coord=1, value=0., axis_direction="left", axes=self), + ) + for k in ["xzero", "yzero"]: + self._axislines[k].line.set_clip_path(self.patch) + self._axislines[k].set_visible(False) + + +Subplot = Axes +SubplotZero = AxesZero diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/floating_axes.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/floating_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..74e4c941879babca194cec7d60fe949e648cb4cf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/floating_axes.py @@ -0,0 +1,275 @@ +""" +An experimental support for curvilinear grid. +""" + +# TODO : +# see if tick_iterator method can be simplified by reusing the parent method. + +import functools + +import numpy as np + +import matplotlib as mpl +from matplotlib import _api, cbook +import matplotlib.patches as mpatches +from matplotlib.path import Path + +from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory + +from . import axislines, grid_helper_curvelinear +from .axis_artist import AxisArtist +from .grid_finder import ExtremeFinderSimple + + +class FloatingAxisArtistHelper( + grid_helper_curvelinear.FloatingAxisArtistHelper): + pass + + +class FixedAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper): + + def __init__(self, grid_helper, side, nth_coord_ticks=None): + """ + nth_coord = along which coordinate value varies. + nth_coord = 0 -> x axis, nth_coord = 1 -> y axis + """ + lon1, lon2, lat1, lat2 = grid_helper.grid_finder.extreme_finder(*[None] * 5) + value, nth_coord = _api.check_getitem( + dict(left=(lon1, 0), right=(lon2, 0), bottom=(lat1, 1), top=(lat2, 1)), + side=side) + super().__init__(grid_helper, nth_coord, value, axis_direction=side) + if nth_coord_ticks is None: + nth_coord_ticks = nth_coord + self.nth_coord_ticks = nth_coord_ticks + + self.value = value + self.grid_helper = grid_helper + self._side = side + + def update_lim(self, axes): + self.grid_helper.update_lim(axes) + self._grid_info = self.grid_helper._grid_info + + def get_tick_iterators(self, axes): + """tick_loc, tick_angle, tick_label, (optionally) tick_label""" + + grid_finder = self.grid_helper.grid_finder + + lat_levs, lat_n, lat_factor = self._grid_info["lat_info"] + yy0 = lat_levs / lat_factor + + lon_levs, lon_n, lon_factor = self._grid_info["lon_info"] + xx0 = lon_levs / lon_factor + + extremes = self.grid_helper.grid_finder.extreme_finder(*[None] * 5) + xmin, xmax = sorted(extremes[:2]) + ymin, ymax = sorted(extremes[2:]) + + def trf_xy(x, y): + trf = grid_finder.get_transform() + axes.transData + return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T + + if self.nth_coord == 0: + mask = (ymin <= yy0) & (yy0 <= ymax) + (xx1, yy1), (dxx1, dyy1), (dxx2, dyy2) = \ + grid_helper_curvelinear._value_and_jacobian( + trf_xy, self.value, yy0[mask], (xmin, xmax), (ymin, ymax)) + labels = self._grid_info["lat_labels"] + + elif self.nth_coord == 1: + mask = (xmin <= xx0) & (xx0 <= xmax) + (xx1, yy1), (dxx2, dyy2), (dxx1, dyy1) = \ + grid_helper_curvelinear._value_and_jacobian( + trf_xy, xx0[mask], self.value, (xmin, xmax), (ymin, ymax)) + labels = self._grid_info["lon_labels"] + + labels = [l for l, m in zip(labels, mask) if m] + + angle_normal = np.arctan2(dyy1, dxx1) + angle_tangent = np.arctan2(dyy2, dxx2) + mm = (dyy1 == 0) & (dxx1 == 0) # points with degenerate normal + angle_normal[mm] = angle_tangent[mm] + np.pi / 2 + + tick_to_axes = self.get_tick_transform(axes) - axes.transAxes + in_01 = functools.partial( + mpl.transforms._interval_contains_close, (0, 1)) + + def f1(): + for x, y, normal, tangent, lab \ + in zip(xx1, yy1, angle_normal, angle_tangent, labels): + c2 = tick_to_axes.transform((x, y)) + if in_01(c2[0]) and in_01(c2[1]): + yield [x, y], *np.rad2deg([normal, tangent]), lab + + return f1(), iter([]) + + def get_line(self, axes): + self.update_lim(axes) + k, v = dict(left=("lon_lines0", 0), + right=("lon_lines0", 1), + bottom=("lat_lines0", 0), + top=("lat_lines0", 1))[self._side] + xx, yy = self._grid_info[k][v] + return Path(np.column_stack([xx, yy])) + + +class ExtremeFinderFixed(ExtremeFinderSimple): + # docstring inherited + + def __init__(self, extremes): + """ + This subclass always returns the same bounding box. + + Parameters + ---------- + extremes : (float, float, float, float) + The bounding box that this helper always returns. + """ + self._extremes = extremes + + def __call__(self, transform_xy, x1, y1, x2, y2): + # docstring inherited + return self._extremes + + +class GridHelperCurveLinear(grid_helper_curvelinear.GridHelperCurveLinear): + + def __init__(self, aux_trans, extremes, + grid_locator1=None, + grid_locator2=None, + tick_formatter1=None, + tick_formatter2=None): + # docstring inherited + super().__init__(aux_trans, + extreme_finder=ExtremeFinderFixed(extremes), + grid_locator1=grid_locator1, + grid_locator2=grid_locator2, + tick_formatter1=tick_formatter1, + tick_formatter2=tick_formatter2) + + def new_fixed_axis( + self, loc, nth_coord=None, axis_direction=None, offset=None, axes=None): + if axes is None: + axes = self.axes + if axis_direction is None: + axis_direction = loc + # This is not the same as the FixedAxisArtistHelper class used by + # grid_helper_curvelinear.GridHelperCurveLinear.new_fixed_axis! + helper = FixedAxisArtistHelper( + self, loc, nth_coord_ticks=nth_coord) + axisline = AxisArtist(axes, helper, axis_direction=axis_direction) + # Perhaps should be moved to the base class? + axisline.line.set_clip_on(True) + axisline.line.set_clip_box(axisline.axes.bbox) + return axisline + + # new_floating_axis will inherit the grid_helper's extremes. + + # def new_floating_axis(self, nth_coord, value, axes=None, axis_direction="bottom"): + # axis = super(GridHelperCurveLinear, + # self).new_floating_axis(nth_coord, + # value, axes=axes, + # axis_direction=axis_direction) + # # set extreme values of the axis helper + # if nth_coord == 1: + # axis.get_helper().set_extremes(*self._extremes[:2]) + # elif nth_coord == 0: + # axis.get_helper().set_extremes(*self._extremes[2:]) + # return axis + + def _update_grid(self, x1, y1, x2, y2): + if self._grid_info is None: + self._grid_info = dict() + + grid_info = self._grid_info + + grid_finder = self.grid_finder + extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy, + x1, y1, x2, y2) + + lon_min, lon_max = sorted(extremes[:2]) + lat_min, lat_max = sorted(extremes[2:]) + grid_info["extremes"] = lon_min, lon_max, lat_min, lat_max # extremes + + lon_levs, lon_n, lon_factor = \ + grid_finder.grid_locator1(lon_min, lon_max) + lon_levs = np.asarray(lon_levs) + lat_levs, lat_n, lat_factor = \ + grid_finder.grid_locator2(lat_min, lat_max) + lat_levs = np.asarray(lat_levs) + + grid_info["lon_info"] = lon_levs, lon_n, lon_factor + grid_info["lat_info"] = lat_levs, lat_n, lat_factor + + grid_info["lon_labels"] = grid_finder._format_ticks( + 1, "bottom", lon_factor, lon_levs) + grid_info["lat_labels"] = grid_finder._format_ticks( + 2, "bottom", lat_factor, lat_levs) + + lon_values = lon_levs[:lon_n] / lon_factor + lat_values = lat_levs[:lat_n] / lat_factor + + lon_lines, lat_lines = grid_finder._get_raw_grid_lines( + lon_values[(lon_min < lon_values) & (lon_values < lon_max)], + lat_values[(lat_min < lat_values) & (lat_values < lat_max)], + lon_min, lon_max, lat_min, lat_max) + + grid_info["lon_lines"] = lon_lines + grid_info["lat_lines"] = lat_lines + + lon_lines, lat_lines = grid_finder._get_raw_grid_lines( + # lon_min, lon_max, lat_min, lat_max) + extremes[:2], extremes[2:], *extremes) + + grid_info["lon_lines0"] = lon_lines + grid_info["lat_lines0"] = lat_lines + + def get_gridlines(self, which="major", axis="both"): + grid_lines = [] + if axis in ["both", "x"]: + grid_lines.extend(self._grid_info["lon_lines"]) + if axis in ["both", "y"]: + grid_lines.extend(self._grid_info["lat_lines"]) + return grid_lines + + +class FloatingAxesBase: + + def __init__(self, *args, grid_helper, **kwargs): + _api.check_isinstance(GridHelperCurveLinear, grid_helper=grid_helper) + super().__init__(*args, grid_helper=grid_helper, **kwargs) + self.set_aspect(1.) + + def _gen_axes_patch(self): + # docstring inherited + x0, x1, y0, y1 = self.get_grid_helper().grid_finder.extreme_finder(*[None] * 5) + patch = mpatches.Polygon([(x0, y0), (x1, y0), (x1, y1), (x0, y1)]) + patch.get_path()._interpolation_steps = 100 + return patch + + def clear(self): + super().clear() + self.patch.set_transform( + self.get_grid_helper().grid_finder.get_transform() + + self.transData) + # The original patch is not in the draw tree; it is only used for + # clipping purposes. + orig_patch = super()._gen_axes_patch() + orig_patch.set_figure(self.get_figure(root=False)) + orig_patch.set_transform(self.transAxes) + self.patch.set_clip_path(orig_patch) + self.gridlines.set_clip_path(orig_patch) + self.adjust_axes_lim() + + def adjust_axes_lim(self): + bbox = self.patch.get_path().get_extents( + # First transform to pixel coords, then to parent data coords. + self.patch.get_transform() - self.transData) + bbox = bbox.expanded(1.02, 1.02) + self.set_xlim(bbox.xmin, bbox.xmax) + self.set_ylim(bbox.ymin, bbox.ymax) + + +floatingaxes_class_factory = cbook._make_class_factory(FloatingAxesBase, "Floating{}") +FloatingAxes = floatingaxes_class_factory(host_axes_class_factory(axislines.Axes)) +FloatingSubplot = FloatingAxes diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_finder.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_finder.py new file mode 100644 index 0000000000000000000000000000000000000000..ff67aa6e872044bb74e8f2e3882a4d3719cbd55d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_finder.py @@ -0,0 +1,326 @@ +import numpy as np + +from matplotlib import ticker as mticker, _api +from matplotlib.transforms import Bbox, Transform + + +def _find_line_box_crossings(xys, bbox): + """ + Find the points where a polyline crosses a bbox, and the crossing angles. + + Parameters + ---------- + xys : (N, 2) array + The polyline coordinates. + bbox : `.Bbox` + The bounding box. + + Returns + ------- + list of ((float, float), float) + Four separate lists of crossings, for the left, right, bottom, and top + sides of the bbox, respectively. For each list, the entries are the + ``((x, y), ccw_angle_in_degrees)`` of the crossing, where an angle of 0 + means that the polyline is moving to the right at the crossing point. + + The entries are computed by linearly interpolating at each crossing + between the nearest points on either side of the bbox edges. + """ + crossings = [] + dxys = xys[1:] - xys[:-1] + for sl in [slice(None), slice(None, None, -1)]: + us, vs = xys.T[sl] # "this" coord, "other" coord + dus, dvs = dxys.T[sl] + umin, vmin = bbox.min[sl] + umax, vmax = bbox.max[sl] + for u0, inside in [(umin, us > umin), (umax, us < umax)]: + cross = [] + idxs, = (inside[:-1] ^ inside[1:]).nonzero() + for idx in idxs: + v = vs[idx] + (u0 - us[idx]) * dvs[idx] / dus[idx] + if not vmin <= v <= vmax: + continue + crossing = (u0, v)[sl] + theta = np.degrees(np.arctan2(*dxys[idx][::-1])) + cross.append((crossing, theta)) + crossings.append(cross) + return crossings + + +class ExtremeFinderSimple: + """ + A helper class to figure out the range of grid lines that need to be drawn. + """ + + def __init__(self, nx, ny): + """ + Parameters + ---------- + nx, ny : int + The number of samples in each direction. + """ + self.nx = nx + self.ny = ny + + def __call__(self, transform_xy, x1, y1, x2, y2): + """ + Compute an approximation of the bounding box obtained by applying + *transform_xy* to the box delimited by ``(x1, y1, x2, y2)``. + + The intended use is to have ``(x1, y1, x2, y2)`` in axes coordinates, + and have *transform_xy* be the transform from axes coordinates to data + coordinates; this method then returns the range of data coordinates + that span the actual axes. + + The computation is done by sampling ``nx * ny`` equispaced points in + the ``(x1, y1, x2, y2)`` box and finding the resulting points with + extremal coordinates; then adding some padding to take into account the + finite sampling. + + As each sampling step covers a relative range of *1/nx* or *1/ny*, + the padding is computed by expanding the span covered by the extremal + coordinates by these fractions. + """ + x, y = np.meshgrid( + np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)) + xt, yt = transform_xy(np.ravel(x), np.ravel(y)) + return self._add_pad(xt.min(), xt.max(), yt.min(), yt.max()) + + def _add_pad(self, x_min, x_max, y_min, y_max): + """Perform the padding mentioned in `__call__`.""" + dx = (x_max - x_min) / self.nx + dy = (y_max - y_min) / self.ny + return x_min - dx, x_max + dx, y_min - dy, y_max + dy + + +class _User2DTransform(Transform): + """A transform defined by two user-set functions.""" + + input_dims = output_dims = 2 + + def __init__(self, forward, backward): + """ + Parameters + ---------- + forward, backward : callable + The forward and backward transforms, taking ``x`` and ``y`` as + separate arguments and returning ``(tr_x, tr_y)``. + """ + # The normal Matplotlib convention would be to take and return an + # (N, 2) array but axisartist uses the transposed version. + super().__init__() + self._forward = forward + self._backward = backward + + def transform_non_affine(self, values): + # docstring inherited + return np.transpose(self._forward(*np.transpose(values))) + + def inverted(self): + # docstring inherited + return type(self)(self._backward, self._forward) + + +class GridFinder: + """ + Internal helper for `~.grid_helper_curvelinear.GridHelperCurveLinear`, with + the same constructor parameters; should not be directly instantiated. + """ + + def __init__(self, + transform, + extreme_finder=None, + grid_locator1=None, + grid_locator2=None, + tick_formatter1=None, + tick_formatter2=None): + if extreme_finder is None: + extreme_finder = ExtremeFinderSimple(20, 20) + if grid_locator1 is None: + grid_locator1 = MaxNLocator() + if grid_locator2 is None: + grid_locator2 = MaxNLocator() + if tick_formatter1 is None: + tick_formatter1 = FormatterPrettyPrint() + if tick_formatter2 is None: + tick_formatter2 = FormatterPrettyPrint() + self.extreme_finder = extreme_finder + self.grid_locator1 = grid_locator1 + self.grid_locator2 = grid_locator2 + self.tick_formatter1 = tick_formatter1 + self.tick_formatter2 = tick_formatter2 + self.set_transform(transform) + + def _format_ticks(self, idx, direction, factor, levels): + """ + Helper to support both standard formatters (inheriting from + `.mticker.Formatter`) and axisartist-specific ones; should be called instead of + directly calling ``self.tick_formatter1`` and ``self.tick_formatter2``. This + method should be considered as a temporary workaround which will be removed in + the future at the same time as axisartist-specific formatters. + """ + fmt = _api.check_getitem( + {1: self.tick_formatter1, 2: self.tick_formatter2}, idx=idx) + return (fmt.format_ticks(levels) if isinstance(fmt, mticker.Formatter) + else fmt(direction, factor, levels)) + + def get_grid_info(self, x1, y1, x2, y2): + """ + lon_values, lat_values : list of grid values. if integer is given, + rough number of grids in each direction. + """ + + extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2) + + # min & max rage of lat (or lon) for each grid line will be drawn. + # i.e., gridline of lon=0 will be drawn from lat_min to lat_max. + + lon_min, lon_max, lat_min, lat_max = extremes + lon_levs, lon_n, lon_factor = self.grid_locator1(lon_min, lon_max) + lon_levs = np.asarray(lon_levs) + lat_levs, lat_n, lat_factor = self.grid_locator2(lat_min, lat_max) + lat_levs = np.asarray(lat_levs) + + lon_values = lon_levs[:lon_n] / lon_factor + lat_values = lat_levs[:lat_n] / lat_factor + + lon_lines, lat_lines = self._get_raw_grid_lines(lon_values, + lat_values, + lon_min, lon_max, + lat_min, lat_max) + + bb = Bbox.from_extents(x1, y1, x2, y2).expanded(1 + 2e-10, 1 + 2e-10) + + grid_info = { + "extremes": extremes, + # "lon", "lat", filled below. + } + + for idx, lon_or_lat, levs, factor, values, lines in [ + (1, "lon", lon_levs, lon_factor, lon_values, lon_lines), + (2, "lat", lat_levs, lat_factor, lat_values, lat_lines), + ]: + grid_info[lon_or_lat] = gi = { + "lines": [[l] for l in lines], + "ticks": {"left": [], "right": [], "bottom": [], "top": []}, + } + for (lx, ly), v, level in zip(lines, values, levs): + all_crossings = _find_line_box_crossings(np.column_stack([lx, ly]), bb) + for side, crossings in zip( + ["left", "right", "bottom", "top"], all_crossings): + for crossing in crossings: + gi["ticks"][side].append({"level": level, "loc": crossing}) + for side in gi["ticks"]: + levs = [tick["level"] for tick in gi["ticks"][side]] + labels = self._format_ticks(idx, side, factor, levs) + for tick, label in zip(gi["ticks"][side], labels): + tick["label"] = label + + return grid_info + + def _get_raw_grid_lines(self, + lon_values, lat_values, + lon_min, lon_max, lat_min, lat_max): + + lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation + lats_i = np.linspace(lat_min, lat_max, 100) + + lon_lines = [self.transform_xy(np.full_like(lats_i, lon), lats_i) + for lon in lon_values] + lat_lines = [self.transform_xy(lons_i, np.full_like(lons_i, lat)) + for lat in lat_values] + + return lon_lines, lat_lines + + def set_transform(self, aux_trans): + if isinstance(aux_trans, Transform): + self._aux_transform = aux_trans + elif len(aux_trans) == 2 and all(map(callable, aux_trans)): + self._aux_transform = _User2DTransform(*aux_trans) + else: + raise TypeError("'aux_trans' must be either a Transform " + "instance or a pair of callables") + + def get_transform(self): + return self._aux_transform + + update_transform = set_transform # backcompat alias. + + def transform_xy(self, x, y): + return self._aux_transform.transform(np.column_stack([x, y])).T + + def inv_transform_xy(self, x, y): + return self._aux_transform.inverted().transform( + np.column_stack([x, y])).T + + def update(self, **kwargs): + for k, v in kwargs.items(): + if k in ["extreme_finder", + "grid_locator1", + "grid_locator2", + "tick_formatter1", + "tick_formatter2"]: + setattr(self, k, v) + else: + raise ValueError(f"Unknown update property {k!r}") + + +class MaxNLocator(mticker.MaxNLocator): + def __init__(self, nbins=10, steps=None, + trim=True, + integer=False, + symmetric=False, + prune=None): + # trim argument has no effect. It has been left for API compatibility + super().__init__(nbins, steps=steps, integer=integer, + symmetric=symmetric, prune=prune) + self.create_dummy_axis() + + def __call__(self, v1, v2): + locs = super().tick_values(v1, v2) + return np.array(locs), len(locs), 1 # 1: factor (see angle_helper) + + +class FixedLocator: + def __init__(self, locs): + self._locs = locs + + def __call__(self, v1, v2): + v1, v2 = sorted([v1, v2]) + locs = np.array([l for l in self._locs if v1 <= l <= v2]) + return locs, len(locs), 1 # 1: factor (see angle_helper) + + +# Tick Formatter + +class FormatterPrettyPrint: + def __init__(self, useMathText=True): + self._fmt = mticker.ScalarFormatter( + useMathText=useMathText, useOffset=False) + self._fmt.create_dummy_axis() + + def __call__(self, direction, factor, values): + return self._fmt.format_ticks(values) + + +class DictFormatter: + def __init__(self, format_dict, formatter=None): + """ + format_dict : dictionary for format strings to be used. + formatter : fall-back formatter + """ + super().__init__() + self._format_dict = format_dict + self._fallback_formatter = formatter + + def __call__(self, direction, factor, values): + """ + factor is ignored if value is found in the dictionary + """ + if self._fallback_formatter: + fallback_strings = self._fallback_formatter( + direction, factor, values) + else: + fallback_strings = [""] * len(values) + return [self._format_dict.get(k, v) + for k, v in zip(values, fallback_strings)] diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py new file mode 100644 index 0000000000000000000000000000000000000000..a7eb9d5cfe2108c3691e72f527e98b7b73e95a65 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py @@ -0,0 +1,328 @@ +""" +An experimental support for curvilinear grid. +""" + +import functools + +import numpy as np + +import matplotlib as mpl +from matplotlib import _api +from matplotlib.path import Path +from matplotlib.transforms import Affine2D, IdentityTransform +from .axislines import ( + _FixedAxisArtistHelperBase, _FloatingAxisArtistHelperBase, GridHelperBase) +from .axis_artist import AxisArtist +from .grid_finder import GridFinder + + +def _value_and_jacobian(func, xs, ys, xlims, ylims): + """ + Compute *func* and its derivatives along x and y at positions *xs*, *ys*, + while ensuring that finite difference calculations don't try to evaluate + values outside of *xlims*, *ylims*. + """ + eps = np.finfo(float).eps ** (1/2) # see e.g. scipy.optimize.approx_fprime + val = func(xs, ys) + # Take the finite difference step in the direction where the bound is the + # furthest; the step size is min of epsilon and distance to that bound. + xlo, xhi = sorted(xlims) + dxlo = xs - xlo + dxhi = xhi - xs + xeps = (np.take([-1, 1], dxhi >= dxlo) + * np.minimum(eps, np.maximum(dxlo, dxhi))) + val_dx = func(xs + xeps, ys) + ylo, yhi = sorted(ylims) + dylo = ys - ylo + dyhi = yhi - ys + yeps = (np.take([-1, 1], dyhi >= dylo) + * np.minimum(eps, np.maximum(dylo, dyhi))) + val_dy = func(xs, ys + yeps) + return (val, (val_dx - val) / xeps, (val_dy - val) / yeps) + + +class FixedAxisArtistHelper(_FixedAxisArtistHelperBase): + """ + Helper class for a fixed axis. + """ + + def __init__(self, grid_helper, side, nth_coord_ticks=None): + """ + nth_coord = along which coordinate value varies. + nth_coord = 0 -> x axis, nth_coord = 1 -> y axis + """ + + super().__init__(loc=side) + + self.grid_helper = grid_helper + if nth_coord_ticks is None: + nth_coord_ticks = self.nth_coord + self.nth_coord_ticks = nth_coord_ticks + + self.side = side + + def update_lim(self, axes): + self.grid_helper.update_lim(axes) + + def get_tick_transform(self, axes): + return axes.transData + + def get_tick_iterators(self, axes): + """tick_loc, tick_angle, tick_label""" + v1, v2 = axes.get_ylim() if self.nth_coord == 0 else axes.get_xlim() + if v1 > v2: # Inverted limits. + side = {"left": "right", "right": "left", + "top": "bottom", "bottom": "top"}[self.side] + else: + side = self.side + + angle_tangent = dict(left=90, right=90, bottom=0, top=0)[side] + + def iter_major(): + for nth_coord, show_labels in [ + (self.nth_coord_ticks, True), (1 - self.nth_coord_ticks, False)]: + gi = self.grid_helper._grid_info[["lon", "lat"][nth_coord]] + for tick in gi["ticks"][side]: + yield (*tick["loc"], angle_tangent, + (tick["label"] if show_labels else "")) + + return iter_major(), iter([]) + + +class FloatingAxisArtistHelper(_FloatingAxisArtistHelperBase): + + def __init__(self, grid_helper, nth_coord, value, axis_direction=None): + """ + nth_coord = along which coordinate value varies. + nth_coord = 0 -> x axis, nth_coord = 1 -> y axis + """ + super().__init__(nth_coord, value) + self.value = value + self.grid_helper = grid_helper + self._extremes = -np.inf, np.inf + self._line_num_points = 100 # number of points to create a line + + def set_extremes(self, e1, e2): + if e1 is None: + e1 = -np.inf + if e2 is None: + e2 = np.inf + self._extremes = e1, e2 + + def update_lim(self, axes): + self.grid_helper.update_lim(axes) + + x1, x2 = axes.get_xlim() + y1, y2 = axes.get_ylim() + grid_finder = self.grid_helper.grid_finder + extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy, + x1, y1, x2, y2) + + lon_min, lon_max, lat_min, lat_max = extremes + e_min, e_max = self._extremes # ranges of other coordinates + if self.nth_coord == 0: + lat_min = max(e_min, lat_min) + lat_max = min(e_max, lat_max) + elif self.nth_coord == 1: + lon_min = max(e_min, lon_min) + lon_max = min(e_max, lon_max) + + lon_levs, lon_n, lon_factor = \ + grid_finder.grid_locator1(lon_min, lon_max) + lat_levs, lat_n, lat_factor = \ + grid_finder.grid_locator2(lat_min, lat_max) + + if self.nth_coord == 0: + xx0 = np.full(self._line_num_points, self.value) + yy0 = np.linspace(lat_min, lat_max, self._line_num_points) + xx, yy = grid_finder.transform_xy(xx0, yy0) + elif self.nth_coord == 1: + xx0 = np.linspace(lon_min, lon_max, self._line_num_points) + yy0 = np.full(self._line_num_points, self.value) + xx, yy = grid_finder.transform_xy(xx0, yy0) + + self._grid_info = { + "extremes": (lon_min, lon_max, lat_min, lat_max), + "lon_info": (lon_levs, lon_n, np.asarray(lon_factor)), + "lat_info": (lat_levs, lat_n, np.asarray(lat_factor)), + "lon_labels": grid_finder._format_ticks( + 1, "bottom", lon_factor, lon_levs), + "lat_labels": grid_finder._format_ticks( + 2, "bottom", lat_factor, lat_levs), + "line_xy": (xx, yy), + } + + def get_axislabel_transform(self, axes): + return Affine2D() # axes.transData + + def get_axislabel_pos_angle(self, axes): + def trf_xy(x, y): + trf = self.grid_helper.grid_finder.get_transform() + axes.transData + return trf.transform([x, y]).T + + xmin, xmax, ymin, ymax = self._grid_info["extremes"] + if self.nth_coord == 0: + xx0 = self.value + yy0 = (ymin + ymax) / 2 + elif self.nth_coord == 1: + xx0 = (xmin + xmax) / 2 + yy0 = self.value + xy1, dxy1_dx, dxy1_dy = _value_and_jacobian( + trf_xy, xx0, yy0, (xmin, xmax), (ymin, ymax)) + p = axes.transAxes.inverted().transform(xy1) + if 0 <= p[0] <= 1 and 0 <= p[1] <= 1: + d = [dxy1_dy, dxy1_dx][self.nth_coord] + return xy1, np.rad2deg(np.arctan2(*d[::-1])) + else: + return None, None + + def get_tick_transform(self, axes): + return IdentityTransform() # axes.transData + + def get_tick_iterators(self, axes): + """tick_loc, tick_angle, tick_label, (optionally) tick_label""" + + lat_levs, lat_n, lat_factor = self._grid_info["lat_info"] + yy0 = lat_levs / lat_factor + + lon_levs, lon_n, lon_factor = self._grid_info["lon_info"] + xx0 = lon_levs / lon_factor + + e0, e1 = self._extremes + + def trf_xy(x, y): + trf = self.grid_helper.grid_finder.get_transform() + axes.transData + return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T + + # find angles + if self.nth_coord == 0: + mask = (e0 <= yy0) & (yy0 <= e1) + (xx1, yy1), (dxx1, dyy1), (dxx2, dyy2) = _value_and_jacobian( + trf_xy, self.value, yy0[mask], (-np.inf, np.inf), (e0, e1)) + labels = self._grid_info["lat_labels"] + + elif self.nth_coord == 1: + mask = (e0 <= xx0) & (xx0 <= e1) + (xx1, yy1), (dxx2, dyy2), (dxx1, dyy1) = _value_and_jacobian( + trf_xy, xx0[mask], self.value, (-np.inf, np.inf), (e0, e1)) + labels = self._grid_info["lon_labels"] + + labels = [l for l, m in zip(labels, mask) if m] + + angle_normal = np.arctan2(dyy1, dxx1) + angle_tangent = np.arctan2(dyy2, dxx2) + mm = (dyy1 == 0) & (dxx1 == 0) # points with degenerate normal + angle_normal[mm] = angle_tangent[mm] + np.pi / 2 + + tick_to_axes = self.get_tick_transform(axes) - axes.transAxes + in_01 = functools.partial( + mpl.transforms._interval_contains_close, (0, 1)) + + def iter_major(): + for x, y, normal, tangent, lab \ + in zip(xx1, yy1, angle_normal, angle_tangent, labels): + c2 = tick_to_axes.transform((x, y)) + if in_01(c2[0]) and in_01(c2[1]): + yield [x, y], *np.rad2deg([normal, tangent]), lab + + return iter_major(), iter([]) + + def get_line_transform(self, axes): + return axes.transData + + def get_line(self, axes): + self.update_lim(axes) + x, y = self._grid_info["line_xy"] + return Path(np.column_stack([x, y])) + + +class GridHelperCurveLinear(GridHelperBase): + def __init__(self, aux_trans, + extreme_finder=None, + grid_locator1=None, + grid_locator2=None, + tick_formatter1=None, + tick_formatter2=None): + """ + Parameters + ---------- + aux_trans : `.Transform` or tuple[Callable, Callable] + The transform from curved coordinates to rectilinear coordinate: + either a `.Transform` instance (which provides also its inverse), + or a pair of callables ``(trans, inv_trans)`` that define the + transform and its inverse. The callables should have signature:: + + x_rect, y_rect = trans(x_curved, y_curved) + x_curved, y_curved = inv_trans(x_rect, y_rect) + + extreme_finder + + grid_locator1, grid_locator2 + Grid locators for each axis. + + tick_formatter1, tick_formatter2 + Tick formatters for each axis. + """ + super().__init__() + self._grid_info = None + self.grid_finder = GridFinder(aux_trans, + extreme_finder, + grid_locator1, + grid_locator2, + tick_formatter1, + tick_formatter2) + + def update_grid_finder(self, aux_trans=None, **kwargs): + if aux_trans is not None: + self.grid_finder.update_transform(aux_trans) + self.grid_finder.update(**kwargs) + self._old_limits = None # Force revalidation. + + @_api.make_keyword_only("3.9", "nth_coord") + def new_fixed_axis( + self, loc, nth_coord=None, axis_direction=None, offset=None, axes=None): + if axes is None: + axes = self.axes + if axis_direction is None: + axis_direction = loc + helper = FixedAxisArtistHelper(self, loc, nth_coord_ticks=nth_coord) + axisline = AxisArtist(axes, helper, axis_direction=axis_direction) + # Why is clip not set on axisline, unlike in new_floating_axis or in + # the floating_axig.GridHelperCurveLinear subclass? + return axisline + + def new_floating_axis(self, nth_coord, value, axes=None, axis_direction="bottom"): + if axes is None: + axes = self.axes + helper = FloatingAxisArtistHelper( + self, nth_coord, value, axis_direction) + axisline = AxisArtist(axes, helper) + axisline.line.set_clip_on(True) + axisline.line.set_clip_box(axisline.axes.bbox) + # axisline.major_ticklabels.set_visible(True) + # axisline.minor_ticklabels.set_visible(False) + return axisline + + def _update_grid(self, x1, y1, x2, y2): + self._grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2) + + def get_gridlines(self, which="major", axis="both"): + grid_lines = [] + if axis in ["both", "x"]: + for gl in self._grid_info["lon"]["lines"]: + grid_lines.extend(gl) + if axis in ["both", "y"]: + for gl in self._grid_info["lat"]["lines"]: + grid_lines.extend(gl) + return grid_lines + + @_api.deprecated("3.9") + def get_tick_iterator(self, nth_coord, axis_side, minor=False): + angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side] + lon_or_lat = ["lon", "lat"][nth_coord] + if not minor: # major ticks + for tick in self._grid_info[lon_or_lat]["ticks"][axis_side]: + yield *tick["loc"], angle_tangent, tick["label"] + else: + for tick in self._grid_info[lon_or_lat]["ticks"][axis_side]: + yield *tick["loc"], angle_tangent, "" diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/parasite_axes.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/parasite_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..4ebd6acc03be2dbb0f5c3360ede2a6a36a3be01b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/parasite_axes.py @@ -0,0 +1,7 @@ +from mpl_toolkits.axes_grid1.parasite_axes import ( + host_axes_class_factory, parasite_axes_class_factory) +from .axislines import Axes + + +ParasiteAxes = parasite_axes_class_factory(Axes) +HostAxes = SubplotHost = host_axes_class_factory(Axes) diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__init__.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4d8ed16a6a24a8c15ab2956ef678a7f256cd80 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__init__.py @@ -0,0 +1,10 @@ +from pathlib import Path + + +# Check that the test directories exist +if not (Path(__file__).parent / "baseline_images").exists(): + raise OSError( + 'The baseline image directory does not exist. ' + 'This is most likely because the test data is not installed. ' + 'You may need to install matplotlib from source to get the ' + 'test data.') diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d288f8e082994a9cbc50bd1f7c6771f2d83b94 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/conftest.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7a367f345821d3a1331fb64908015e37bf9ec36 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/conftest.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/test_axis_artist.cpython-310.pyc b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/test_axis_artist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e7c532af0fa6037c60ef8978a23336425334bcf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/__pycache__/test_axis_artist.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_axislines.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_axislines.py new file mode 100644 index 0000000000000000000000000000000000000000..b722316a5c0c01fff91be6c67dc7223f307ece2a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_axislines.py @@ -0,0 +1,147 @@ +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.testing.decorators import image_comparison +from matplotlib.transforms import IdentityTransform + +from mpl_toolkits.axisartist.axislines import AxesZero, SubplotZero, Subplot +from mpl_toolkits.axisartist import Axes, SubplotHost + + +@image_comparison(['SubplotZero.png'], style='default') +def test_SubplotZero(): + # Remove this line when this test image is regenerated. + plt.rcParams['text.kerning_factor'] = 6 + + fig = plt.figure() + + ax = SubplotZero(fig, 1, 1, 1) + fig.add_subplot(ax) + + ax.axis["xzero"].set_visible(True) + ax.axis["xzero"].label.set_text("Axis Zero") + + for n in ["top", "right"]: + ax.axis[n].set_visible(False) + + xx = np.arange(0, 2 * np.pi, 0.01) + ax.plot(xx, np.sin(xx)) + ax.set_ylabel("Test") + + +@image_comparison(['Subplot.png'], style='default') +def test_Subplot(): + # Remove this line when this test image is regenerated. + plt.rcParams['text.kerning_factor'] = 6 + + fig = plt.figure() + + ax = Subplot(fig, 1, 1, 1) + fig.add_subplot(ax) + + xx = np.arange(0, 2 * np.pi, 0.01) + ax.plot(xx, np.sin(xx)) + ax.set_ylabel("Test") + + ax.axis["top"].major_ticks.set_tick_out(True) + ax.axis["bottom"].major_ticks.set_tick_out(True) + + ax.axis["bottom"].set_label("Tk0") + + +def test_Axes(): + fig = plt.figure() + ax = Axes(fig, [0.15, 0.1, 0.65, 0.8]) + fig.add_axes(ax) + ax.plot([1, 2, 3], [0, 1, 2]) + ax.set_xscale('log') + fig.canvas.draw() + + +@image_comparison(['ParasiteAxesAuxTrans_meshplot.png'], + remove_text=True, style='default', tol=0.075) +def test_ParasiteAxesAuxTrans(): + data = np.ones((6, 6)) + data[2, 2] = 2 + data[0, :] = 0 + data[-2, :] = 0 + data[:, 0] = 0 + data[:, -2] = 0 + x = np.arange(6) + y = np.arange(6) + xx, yy = np.meshgrid(x, y) + + funcnames = ['pcolor', 'pcolormesh', 'contourf'] + + fig = plt.figure() + for i, name in enumerate(funcnames): + + ax1 = SubplotHost(fig, 1, 3, i+1) + fig.add_subplot(ax1) + + ax2 = ax1.get_aux_axes(IdentityTransform(), viewlim_mode=None) + if name.startswith('pcolor'): + getattr(ax2, name)(xx, yy, data[:-1, :-1]) + else: + getattr(ax2, name)(xx, yy, data) + ax1.set_xlim((0, 5)) + ax1.set_ylim((0, 5)) + + ax2.contour(xx, yy, data, colors='k') + + +@image_comparison(['axisline_style.png'], remove_text=True, style='mpl20') +def test_axisline_style(): + fig = plt.figure(figsize=(2, 2)) + ax = fig.add_subplot(axes_class=AxesZero) + ax.axis["xzero"].set_axisline_style("-|>") + ax.axis["xzero"].set_visible(True) + ax.axis["yzero"].set_axisline_style("->") + ax.axis["yzero"].set_visible(True) + + for direction in ("left", "right", "bottom", "top"): + ax.axis[direction].set_visible(False) + + +@image_comparison(['axisline_style_size_color.png'], remove_text=True, + style='mpl20') +def test_axisline_style_size_color(): + fig = plt.figure(figsize=(2, 2)) + ax = fig.add_subplot(axes_class=AxesZero) + ax.axis["xzero"].set_axisline_style("-|>", size=2.0, facecolor='r') + ax.axis["xzero"].set_visible(True) + ax.axis["yzero"].set_axisline_style("->, size=1.5") + ax.axis["yzero"].set_visible(True) + + for direction in ("left", "right", "bottom", "top"): + ax.axis[direction].set_visible(False) + + +@image_comparison(['axisline_style_tight.png'], remove_text=True, + style='mpl20') +def test_axisline_style_tight(): + fig = plt.figure(figsize=(2, 2)) + ax = fig.add_subplot(axes_class=AxesZero) + ax.axis["xzero"].set_axisline_style("-|>", size=5, facecolor='g') + ax.axis["xzero"].set_visible(True) + ax.axis["yzero"].set_axisline_style("->, size=8") + ax.axis["yzero"].set_visible(True) + + for direction in ("left", "right", "bottom", "top"): + ax.axis[direction].set_visible(False) + + fig.tight_layout() + + +@image_comparison(['subplotzero_ylabel.png'], style='mpl20') +def test_subplotzero_ylabel(): + fig = plt.figure() + ax = fig.add_subplot(111, axes_class=SubplotZero) + + ax.set(xlim=(-3, 7), ylim=(-3, 7), xlabel="x", ylabel="y") + + zero_axis = ax.axis["xzero", "yzero"] + zero_axis.set_visible(True) # they are hidden by default + + ax.axis["left", "right", "bottom", "top"].set_visible(False) + + zero_axis.set_axisline_style("->") diff --git a/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_floating_axes.py b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_floating_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..7644fea1696599273f6214d7735bb919b500108c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/mpl_toolkits/axisartist/tests/test_floating_axes.py @@ -0,0 +1,115 @@ +import numpy as np + +import matplotlib.pyplot as plt +import matplotlib.projections as mprojections +import matplotlib.transforms as mtransforms +from matplotlib.testing.decorators import image_comparison +from mpl_toolkits.axisartist.axislines import Subplot +from mpl_toolkits.axisartist.floating_axes import ( + FloatingAxes, GridHelperCurveLinear) +from mpl_toolkits.axisartist.grid_finder import FixedLocator +from mpl_toolkits.axisartist import angle_helper + + +def test_subplot(): + fig = plt.figure(figsize=(5, 5)) + ax = Subplot(fig, 111) + fig.add_subplot(ax) + + +# Rather high tolerance to allow ongoing work with floating axes internals; +# remove when image is regenerated. +@image_comparison(['curvelinear3.png'], style='default', tol=5) +def test_curvelinear3(): + fig = plt.figure(figsize=(5, 5)) + + tr = (mtransforms.Affine2D().scale(np.pi / 180, 1) + + mprojections.PolarAxes.PolarTransform(apply_theta_transforms=False)) + grid_helper = GridHelperCurveLinear( + tr, + extremes=(0, 360, 10, 3), + grid_locator1=angle_helper.LocatorDMS(15), + grid_locator2=FixedLocator([2, 4, 6, 8, 10]), + tick_formatter1=angle_helper.FormatterDMS(), + tick_formatter2=None) + ax1 = fig.add_subplot(axes_class=FloatingAxes, grid_helper=grid_helper) + + r_scale = 10 + tr2 = mtransforms.Affine2D().scale(1, 1 / r_scale) + tr + grid_helper2 = GridHelperCurveLinear( + tr2, + extremes=(0, 360, 10 * r_scale, 3 * r_scale), + grid_locator2=FixedLocator([30, 60, 90])) + + ax1.axis["right"] = axis = grid_helper2.new_fixed_axis("right", axes=ax1) + + ax1.axis["left"].label.set_text("Test 1") + ax1.axis["right"].label.set_text("Test 2") + ax1.axis["left", "right"].set_visible(False) + + axis = grid_helper.new_floating_axis(1, 7, axes=ax1, + axis_direction="bottom") + ax1.axis["z"] = axis + axis.toggle(all=True, label=True) + axis.label.set_text("z = ?") + axis.label.set_visible(True) + axis.line.set_color("0.5") + + ax2 = ax1.get_aux_axes(tr) + + xx, yy = [67, 90, 75, 30], [2, 5, 8, 4] + ax2.scatter(xx, yy) + l, = ax2.plot(xx, yy, "k-") + l.set_clip_path(ax1.patch) + + +# Rather high tolerance to allow ongoing work with floating axes internals; +# remove when image is regenerated. +@image_comparison(['curvelinear4.png'], style='default', tol=0.9) +def test_curvelinear4(): + # Remove this line when this test image is regenerated. + plt.rcParams['text.kerning_factor'] = 6 + + fig = plt.figure(figsize=(5, 5)) + + tr = (mtransforms.Affine2D().scale(np.pi / 180, 1) + + mprojections.PolarAxes.PolarTransform(apply_theta_transforms=False)) + grid_helper = GridHelperCurveLinear( + tr, + extremes=(120, 30, 10, 0), + grid_locator1=angle_helper.LocatorDMS(5), + grid_locator2=FixedLocator([2, 4, 6, 8, 10]), + tick_formatter1=angle_helper.FormatterDMS(), + tick_formatter2=None) + ax1 = fig.add_subplot(axes_class=FloatingAxes, grid_helper=grid_helper) + ax1.clear() # Check that clear() also restores the correct limits on ax1. + + ax1.axis["left"].label.set_text("Test 1") + ax1.axis["right"].label.set_text("Test 2") + ax1.axis["top"].set_visible(False) + + axis = grid_helper.new_floating_axis(1, 70, axes=ax1, + axis_direction="bottom") + ax1.axis["z"] = axis + axis.toggle(all=True, label=True) + axis.label.set_axis_direction("top") + axis.label.set_text("z = ?") + axis.label.set_visible(True) + axis.line.set_color("0.5") + + ax2 = ax1.get_aux_axes(tr) + + xx, yy = [67, 90, 75, 30], [2, 5, 8, 4] + ax2.scatter(xx, yy) + l, = ax2.plot(xx, yy, "k-") + l.set_clip_path(ax1.patch) + + +def test_axis_direction(): + # Check that axis direction is propagated on a floating axis + fig = plt.figure() + ax = Subplot(fig, 111) + fig.add_subplot(ax) + ax.axis['y'] = ax.new_floating_axis(nth_coord=1, value=0, + axis_direction='left') + assert ax.axis['y']._axis_direction == 'left' diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b9077766e9b9bdcae49ea5c8fced750ed13ec8f7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2018, Tzu-ping Chung + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/METADATA b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..52118f1e5c83bd7ef39196a749651fc87d176812 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/METADATA @@ -0,0 +1,106 @@ +Metadata-Version: 2.1 +Name: shellingham +Version: 1.5.4 +Summary: Tool to Detect Surrounding Shell +Home-page: https://github.com/sarugaku/shellingham +Author: Tzu-ping Chung +Author-email: uranusjr@gmail.com +License: ISC License +Keywords: shell +Classifier: Development Status :: 3 - Alpha +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: ISC License (ISCL) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE + +============================================= +Shellingham: Tool to Detect Surrounding Shell +============================================= + +.. image:: https://img.shields.io/pypi/v/shellingham.svg + :target: https://pypi.org/project/shellingham/ + +Shellingham detects what shell the current Python executable is running in. + + +Usage +===== + +.. code-block:: python + + >>> import shellingham + >>> shellingham.detect_shell() + ('bash', '/bin/bash') + +``detect_shell`` pokes around the process's running environment to determine +what shell it is run in. It returns a 2-tuple: + +* The shell name, always lowercased. +* The command used to run the shell. + +``ShellDetectionFailure`` is raised if ``detect_shell`` fails to detect the +surrounding shell. + + +Notes +===== + +* The shell name is always lowercased. +* On Windows, the shell name is the name of the executable, minus the file + extension. + + +Notes for Application Developers +================================ + +Remember, your application's user is not necessarily using a shell. +Shellingham raises ``ShellDetectionFailure`` if there is no shell to detect, +but *your application should almost never do this to your user*. + +A practical approach to this is to wrap ``detect_shell`` in a try block, and +provide a sane default on failure + +.. code-block:: python + + try: + shell = shellingham.detect_shell() + except shellingham.ShellDetectionFailure: + shell = provide_default() + + +There are a few choices for you to choose from. + +* The POSIX standard mandates the environment variable ``SHELL`` to refer to + "the user's preferred command language interpreter". This is always available + (even if the user is not in an interactive session), and likely the correct + choice to launch an interactive sub-shell with. +* A command ``sh`` is almost guaranteed to exist, likely at ``/bin/sh``, since + several POSIX tools rely on it. This should be suitable if you want to run a + (possibly non-interactive) script. +* All versions of DOS and Windows have an environment variable ``COMSPEC``. + This can always be used to launch a usable command prompt (e.g. `cmd.exe` on + Windows). + +Here's a simple implementation to provide a default shell + +.. code-block:: python + + import os + + def provide_default(): + if os.name == 'posix': + return os.environ['SHELL'] + elif os.name == 'nt': + return os.environ['COMSPEC'] + raise NotImplementedError(f'OS {os.name!r} support not available') diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/RECORD b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3451dbfb7c4ac32b752c1c09de217d7c51df9c9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/RECORD @@ -0,0 +1,22 @@ +shellingham-1.5.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +shellingham-1.5.4.dist-info/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +shellingham-1.5.4.dist-info/METADATA,sha256=GD2AIgo3STJieVc53TV8xbs_Sb05DMkZjVGA5UUaB_o,3461 +shellingham-1.5.4.dist-info/RECORD,, +shellingham-1.5.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +shellingham-1.5.4.dist-info/WHEEL,sha256=iYlv5fX357PQyRT2o6tw1bN-YcKFFHKqB_LwHO5wP-g,110 +shellingham-1.5.4.dist-info/top_level.txt,sha256=uKMQL5AKxPi4O9_Rbd838QeEs4ImpGQKNbEDZYqgBgk,12 +shellingham-1.5.4.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +shellingham/__init__.py,sha256=pAKXUPKUdwyErC0ZjS-5w-fRdSbmdcfvnpt_x1yWqtA,635 +shellingham/__pycache__/__init__.cpython-310.pyc,, +shellingham/__pycache__/_core.cpython-310.pyc,, +shellingham/__pycache__/nt.cpython-310.pyc,, +shellingham/_core.py,sha256=v-CTr_7F7cJAtNnzpa1N_Hl8afkY5yiDA4joGmsUBu0,300 +shellingham/nt.py,sha256=m6J6SuwyqVVlxXT9Bc-9F_1x-T5u0gCFFrRAF2LIkeg,4516 +shellingham/posix/__init__.py,sha256=pB69qtvZJ_yIf48nl4-ZfS3wLwwuXuknXOZhBnC2T1o,3129 +shellingham/posix/__pycache__/__init__.cpython-310.pyc,, +shellingham/posix/__pycache__/_core.cpython-310.pyc,, +shellingham/posix/__pycache__/proc.cpython-310.pyc,, +shellingham/posix/__pycache__/ps.cpython-310.pyc,, +shellingham/posix/_core.py,sha256=_v18UaXbzr4muNhr3-mH1FdSdjZ_dOXQrtUyomIbKYQ,81 +shellingham/posix/proc.py,sha256=nSUxIuQSotvaDW76i0oTQAM9aZ9PXBLFAEktWljSKCo,2659 +shellingham/posix/ps.py,sha256=NGmDKCukhNp0lahwYCaMXphBYaVbhbiR9BtE0OkT8qU,1770 diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c34f1162ef9a50c355df1261ef6194ffc1b39975 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4e44ce0299bb38463f8491ec8850910235c2709 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/top_level.txt @@ -0,0 +1 @@ +shellingham diff --git a/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/zip-safe b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/shellingham-1.5.4.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb21fb8521ea333f539f05953591ec7eb1cbf528 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30ddccd4fc863c4b814fb902fc4e105684b94c231d94805c91ec8ec341e6f903 +size 121437