diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AnalysedTreeTransforms.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AnalysedTreeTransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..d4941606ef6f4c1072b5c68dcabe328ac6eaad3a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AnalysedTreeTransforms.py @@ -0,0 +1,99 @@ +from __future__ import absolute_import + +from .Visitor import ScopeTrackingTransform +from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode +from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode +from .PyrexTypes import py_object_type +from .StringEncoding import EncodedString +from . import Symtab + +class AutoTestDictTransform(ScopeTrackingTransform): + # Handles autotestdict directive + + excludelist = ['__cinit__', '__dealloc__', '__richcmp__', + '__nonzero__', '__bool__', + '__len__', '__contains__'] + + def visit_ModuleNode(self, node): + if node.is_pxd: + return node + self.scope_type = 'module' + self.scope_node = node + + if not self.current_directives['autotestdict']: + return node + self.all_docstrings = self.current_directives['autotestdict.all'] + self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef'] + + assert isinstance(node.body, StatListNode) + + # First see if __test__ is already created + if u'__test__' in node.scope.entries: + # Do nothing + return node + + pos = node.pos + + self.tests = [] + self.testspos = node.pos + + test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'), + py_object_type, + pos, + visibility='public') + create_test_dict_assignment = SingleAssignmentNode(pos, + lhs=NameNode(pos, name=EncodedString(u'__test__'), + entry=test_dict_entry), + rhs=DictNode(pos, key_value_pairs=self.tests)) + self.visitchildren(node) + node.body.stats.append(create_test_dict_assignment) + return node + + def add_test(self, testpos, path, doctest): + pos = self.testspos + keystr = u'%s (line %d)' % (path, testpos[1]) + key = UnicodeNode(pos, value=EncodedString(keystr)) + value = UnicodeNode(pos, value=doctest) + self.tests.append(DictItemNode(pos, key=key, value=value)) + + def visit_ExprNode(self, node): + # expressions cannot contain functions and lambda expressions + # do not have a docstring + return node + + def visit_FuncDefNode(self, node): + if not node.doc or (isinstance(node, DefNode) and node.fused_py_func): + return node + if not self.cdef_docstrings: + if isinstance(node, CFuncDefNode) and not node.py_func: + return node + if not self.all_docstrings and '>>>' not in node.doc: + return node + + pos = self.testspos + if self.scope_type == 'module': + path = node.entry.name + elif self.scope_type in ('pyclass', 'cclass'): + if isinstance(node, CFuncDefNode): + if node.py_func is not None: + name = node.py_func.name + else: + name = node.entry.name + else: + name = node.name + if self.scope_type == 'cclass' and name in self.excludelist: + return node + if self.scope_type == 'pyclass': + class_name = self.scope_node.name + else: + class_name = self.scope_node.class_name + if isinstance(node.entry.scope, Symtab.PropertyScope): + property_method_name = node.entry.scope.name + path = "%s.%s.%s" % (class_name, node.entry.scope.name, + node.entry.name) + else: + path = "%s.%s" % (class_name, node.entry.name) + else: + assert False + self.add_test(node.pos, path, node.doc) + return node diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Builtin.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..46dea9282bead154aef485374c71cce4dc8cb139 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Builtin.py @@ -0,0 +1,644 @@ +# +# Builtin Definitions +# + +from __future__ import absolute_import + +from .StringEncoding import EncodedString +from .Symtab import BuiltinScope, StructOrUnionScope, ModuleScope, Entry +from .Code import UtilityCode, TempitaUtilityCode +from .TypeSlots import Signature +from . import PyrexTypes + + +# C-level implementations of builtin types, functions and methods + +iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c") +getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c") +getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c") +pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c") +pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c") +globals_utility_code = UtilityCode.load("Globals", "Builtins.c") + +builtin_utility_code = { + 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"), +} + + +# mapping from builtins to their C-level equivalents + +class _BuiltinOverride(object): + def __init__(self, py_name, args, ret_type, cname, py_equiv="*", + utility_code=None, sig=None, func_type=None, + is_strict_signature=False, builtin_return_type=None, + nogil=None): + self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv + self.args, self.ret_type = args, ret_type + self.func_type, self.sig = func_type, sig + self.builtin_return_type = builtin_return_type + self.is_strict_signature = is_strict_signature + self.utility_code = utility_code + self.nogil = nogil + + def build_func_type(self, sig=None, self_arg=None): + if sig is None: + sig = Signature(self.args, self.ret_type, nogil=self.nogil) + sig.exception_check = False # not needed for the current builtins + func_type = sig.function_type(self_arg) + if self.is_strict_signature: + func_type.is_strict_signature = True + if self.builtin_return_type: + func_type.return_type = builtin_types[self.builtin_return_type] + return func_type + + +class BuiltinAttribute(object): + def __init__(self, py_name, cname=None, field_type=None, field_type_name=None): + self.py_name = py_name + self.cname = cname or py_name + self.field_type_name = field_type_name # can't do the lookup before the type is declared! + self.field_type = field_type + + def declare_in_type(self, self_type): + if self.field_type_name is not None: + # lazy type lookup + field_type = builtin_scope.lookup(self.field_type_name).type + else: + field_type = self.field_type or PyrexTypes.py_object_type + entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private') + entry.is_variable = True + + +class BuiltinFunction(_BuiltinOverride): + def declare_in_scope(self, scope): + func_type, sig = self.func_type, self.sig + if func_type is None: + func_type = self.build_func_type(sig) + scope.declare_builtin_cfunction(self.py_name, func_type, self.cname, + self.py_equiv, self.utility_code) + + +class BuiltinMethod(_BuiltinOverride): + def declare_in_type(self, self_type): + method_type, sig = self.func_type, self.sig + if method_type is None: + # override 'self' type (first argument) + self_arg = PyrexTypes.CFuncTypeArg("", self_type, None) + self_arg.not_none = True + self_arg.accept_builtin_subtypes = True + method_type = self.build_func_type(sig, self_arg) + self_type.scope.declare_builtin_cfunction( + self.py_name, method_type, self.cname, utility_code=self.utility_code) + + +class BuiltinProperty(object): + # read only for now + def __init__(self, py_name, property_type, call_cname, + exception_value=None, exception_check=None, utility_code=None): + self.py_name = py_name + self.property_type = property_type + self.call_cname = call_cname + self.utility_code = utility_code + self.exception_value = exception_value + self.exception_check = exception_check + + def declare_in_type(self, self_type): + self_type.scope.declare_cproperty( + self.py_name, + self.property_type, + self.call_cname, + exception_value=self.exception_value, + exception_check=self.exception_check, + utility_code=self.utility_code + ) + + +builtin_function_table = [ + # name, args, return, C API func, py equiv = "*" + BuiltinFunction('abs', "d", "d", "fabs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "f", "f", "fabsf", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "i", "i", "abs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "l", "l", "labs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', None, None, "__Pyx_abs_longlong", + utility_code = UtilityCode.load("abs_longlong", "Builtins.c"), + func_type = PyrexTypes.CFuncType( + PyrexTypes.c_longlong_type, [ + PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None) + ], + is_strict_signature = True, nogil=True)), + ] + list( + BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()), + func_type = PyrexTypes.CFuncType( + t, + [PyrexTypes.CFuncTypeArg("arg", t, None)], + is_strict_signature = True, nogil=True)) + for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type) + ) + list( + BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix), + func_type = PyrexTypes.CFuncType( + t.real_type, [ + PyrexTypes.CFuncTypeArg("arg", t, None) + ], + is_strict_signature = True, nogil=True)) + for t in (PyrexTypes.c_float_complex_type, + PyrexTypes.c_double_complex_type, + PyrexTypes.c_longdouble_complex_type) + ) + [ + BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute", + utility_code=UtilityCode.load("py_abs", "Builtins.c")), + #('all', "", "", ""), + #('any', "", "", ""), + #('ascii', "", "", ""), + #('bin', "", "", ""), + BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check", + utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")), + #('chr', "", "", ""), + #('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result) + #('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start) + BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"), + BuiltinFunction('dir', "O", "O", "PyObject_Dir"), + BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"), + BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals", + utility_code = pyexec_globals_utility_code), + BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2", + utility_code = pyexec_utility_code), + BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3", + utility_code = pyexec_utility_code), + #('eval', "", "", ""), + #('execfile', "", "", ""), + #('filter', "", "", ""), + BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr", + utility_code=getattr3_utility_code), # Pyrex legacy + BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3", + utility_code=getattr3_utility_code), + BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr", + utility_code=getattr_utility_code), + BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr", + utility_code = UtilityCode.load("HasAttr", "Builtins.c")), + BuiltinFunction('hash', "O", "h", "PyObject_Hash"), + #('hex', "", "", ""), + #('id', "", "", ""), + #('input', "", "", ""), + BuiltinFunction('intern', "O", "O", "__Pyx_Intern", + utility_code = UtilityCode.load("Intern", "Builtins.c")), + BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"), + BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"), + BuiltinFunction('iter', "OO", "O", "PyCallIter_New"), + BuiltinFunction('iter', "O", "O", "PyObject_GetIter"), + BuiltinFunction('len', "O", "z", "PyObject_Length"), + BuiltinFunction('locals', "", "O", "__pyx_locals"), + #('map', "", "", ""), + #('max', "", "", ""), + #('min', "", "", ""), + BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next", + utility_code = iter_next_utility_code), # not available in Py2 => implemented here + BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2", + utility_code = iter_next_utility_code), # not available in Py2 => implemented here + #('oct', "", "", ""), + #('open', "ss", "O", "PyFile_FromString"), # not in Py3 +] + [ + BuiltinFunction('ord', None, None, "__Pyx_long_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_uchar_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord", + utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"), + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [ + PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None) + ], + exception_value="(long)(Py_UCS4)-1")), + BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"), + BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2", + utility_code = UtilityCode.load("pow2", "Builtins.c")), + #('range', "", "", ""), + #('raw_input', "", "", ""), + #('reduce', "", "", ""), + BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"), + BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1 + #('round', "", "", ""), + BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"), + #('sum', "", "", ""), + #('sorted', "", "", ""), + #('type', "O", "O", "PyObject_Type"), + BuiltinFunction('unichr', "i", "O", "PyUnicode_FromOrdinal", builtin_return_type='unicode'), + #('unicode', "", "", ""), + #('vars', "", "", ""), + #('zip', "", "", ""), + # Can't do these easily until we have builtin type entries. + #('typecheck', "OO", "i", "PyObject_TypeCheck", False), + #('issubtype', "OO", "i", "PyType_IsSubtype", False), + + # Put in namespace append optimization. + BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"), + + # This is conditionally looked up based on a compiler directive. + BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals", + utility_code=globals_utility_code), +] + + +# Builtin types +# bool +# buffer +# classmethod +# dict +# enumerate +# file +# float +# int +# list +# long +# object +# property +# slice +# staticmethod +# super +# str +# tuple +# type +# xrange + +builtin_types_table = [ + + ("type", "PyType_Type", []), + +# This conflicts with the C++ bool type, and unfortunately +# C++ is too liberal about PyObject* <-> bool conversions, +# resulting in unintuitive runtime behavior and segfaults. +# ("bool", "PyBool_Type", []), + + ("int", "PyInt_Type", []), + ("long", "PyLong_Type", []), + ("float", "PyFloat_Type", []), + + ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'), + BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type), + BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type), + ]), + + ("basestring", "PyBaseString_Type", [ + BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join", + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("bytearray", "PyByteArray_Type", [ + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("bytes", "PyBytes_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join", + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("str", "PyString_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join", + builtin_return_type='basestring', + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"), + BuiltinMethod("join", "TO", "T", "PyUnicode_Join"), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("tuple", "PyTuple_Type", [BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("list", "PyList_Type", [BuiltinMethod("insert", "TzO", "r", "PyList_Insert"), + BuiltinMethod("reverse", "T", "r", "PyList_Reverse"), + BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append", + utility_code=UtilityCode.load("ListAppend", "Optimize.c")), + BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend", + utility_code=UtilityCode.load("ListExtend", "Optimize.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"), + BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"), + BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items", + utility_code=UtilityCode.load("py_dict_items", "Builtins.c")), + BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys", + utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")), + BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values", + utility_code=UtilityCode.load("py_dict_values", "Builtins.c")), + BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems", + utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")), + BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys", + utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")), + BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues", + utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")), + BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems", + utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")), + BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys", + utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")), + BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues", + utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")), + BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear", + utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")), + BuiltinMethod("copy", "T", "T", "PyDict_Copy")]), + + ("slice", "PySlice_Type", [BuiltinAttribute('start'), + BuiltinAttribute('stop'), + BuiltinAttribute('step'), + ]), +# ("file", "PyFile_Type", []), # not in Py3 + + ("set", "PySet_Type", [BuiltinMethod("clear", "T", "r", "PySet_Clear"), + # discard() and remove() have a special treatment for unhashable values + BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard", + utility_code=UtilityCode.load("py_set_discard", "Optimize.c")), + BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove", + utility_code=UtilityCode.load("py_set_remove", "Optimize.c")), + # update is actually variadic (see Github issue #1645) +# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update", +# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")), + BuiltinMethod("add", "TO", "r", "PySet_Add"), + BuiltinMethod("pop", "T", "O", "PySet_Pop")]), + ("frozenset", "PyFrozenSet_Type", []), + ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []), + ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []), + ("memoryview", "PyMemoryView_Type", [ + # TODO - format would be nice, but hard to get + # __len__ can be accessed through a direct lookup of the buffer (but probably in Optimize.c) + # error checking would ideally be limited api only + BuiltinProperty("ndim", PyrexTypes.c_int_type, '__Pyx_PyMemoryView_Get_ndim', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="ndim") + ) + ), + BuiltinProperty("readonly", PyrexTypes.c_bint_type, '__Pyx_PyMemoryView_Get_readonly', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="readonly") + ) + ), + BuiltinProperty("itemsize", PyrexTypes.c_py_ssize_t_type, '__Pyx_PyMemoryView_Get_itemsize', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="itemsize") + ) + )] + ) +] + + +types_that_construct_their_instance = frozenset({ + # some builtin types do not always return an instance of + # themselves - these do: + 'type', 'bool', 'long', 'float', 'complex', + 'bytes', 'unicode', 'bytearray', + 'tuple', 'list', 'dict', 'set', 'frozenset', + # 'str', # only in Py3.x + # 'file', # only in Py2.x + 'memoryview' +}) + + +builtin_structs_table = [ + ('Py_buffer', 'Py_buffer', + [("buf", PyrexTypes.c_void_ptr_type), + ("obj", PyrexTypes.py_object_type), + ("len", PyrexTypes.c_py_ssize_t_type), + ("itemsize", PyrexTypes.c_py_ssize_t_type), + ("readonly", PyrexTypes.c_bint_type), + ("ndim", PyrexTypes.c_int_type), + ("format", PyrexTypes.c_char_ptr_type), + ("shape", PyrexTypes.c_py_ssize_t_ptr_type), + ("strides", PyrexTypes.c_py_ssize_t_ptr_type), + ("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type), + ("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)), + ("internal", PyrexTypes.c_void_ptr_type), + ]), + ('Py_complex', 'Py_complex', + [('real', PyrexTypes.c_double_type), + ('imag', PyrexTypes.c_double_type), + ]) +] + +# set up builtin scope + +builtin_scope = BuiltinScope() + +def init_builtin_funcs(): + for bf in builtin_function_table: + bf.declare_in_scope(builtin_scope) + +builtin_types = {} + +def init_builtin_types(): + global builtin_types + for name, cname, methods in builtin_types_table: + utility = builtin_utility_code.get(name) + if name == 'frozenset': + objstruct_cname = 'PySetObject' + elif name == 'bytearray': + objstruct_cname = 'PyByteArrayObject' + elif name == 'bool': + objstruct_cname = None + elif name == 'Exception': + objstruct_cname = "PyBaseExceptionObject" + elif name == 'StopAsyncIteration': + objstruct_cname = "PyBaseExceptionObject" + else: + objstruct_cname = 'Py%sObject' % name.capitalize() + type_class = PyrexTypes.BuiltinObjectType + if name in ['dict', 'list', 'set', 'frozenset']: + type_class = PyrexTypes.BuiltinTypeConstructorObjectType + elif name == 'tuple': + type_class = PyrexTypes.PythonTupleTypeConstructor + the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname, + type_class=type_class) + builtin_types[name] = the_type + for method in methods: + method.declare_in_type(the_type) + +def init_builtin_structs(): + for name, cname, attribute_types in builtin_structs_table: + scope = StructOrUnionScope(name) + for attribute_name, attribute_type in attribute_types: + scope.declare_var(attribute_name, attribute_type, None, + attribute_name, allow_pyobject=True) + builtin_scope.declare_struct_or_union( + name, "struct", scope, 1, None, cname = cname) + + +def init_builtins(): + #Errors.init_thread() # hopefully not needed - we should not emit warnings ourselves + init_builtin_structs() + init_builtin_types() + init_builtin_funcs() + + entry = builtin_scope.declare_var( + '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type), + pos=None, cname='__pyx_assertions_enabled()', is_cdef=True) + entry.utility_code = UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c") + + global type_type, list_type, tuple_type, dict_type, set_type, frozenset_type, slice_type + global bytes_type, str_type, unicode_type, basestring_type, bytearray_type + global float_type, int_type, long_type, bool_type, complex_type + global memoryview_type, py_buffer_type + global sequence_types + type_type = builtin_scope.lookup('type').type + list_type = builtin_scope.lookup('list').type + tuple_type = builtin_scope.lookup('tuple').type + dict_type = builtin_scope.lookup('dict').type + set_type = builtin_scope.lookup('set').type + frozenset_type = builtin_scope.lookup('frozenset').type + slice_type = builtin_scope.lookup('slice').type + + bytes_type = builtin_scope.lookup('bytes').type + str_type = builtin_scope.lookup('str').type + unicode_type = builtin_scope.lookup('unicode').type + basestring_type = builtin_scope.lookup('basestring').type + bytearray_type = builtin_scope.lookup('bytearray').type + memoryview_type = builtin_scope.lookup('memoryview').type + + float_type = builtin_scope.lookup('float').type + int_type = builtin_scope.lookup('int').type + long_type = builtin_scope.lookup('long').type + bool_type = builtin_scope.lookup('bool').type + complex_type = builtin_scope.lookup('complex').type + + sequence_types = ( + list_type, + tuple_type, + bytes_type, + str_type, + unicode_type, + basestring_type, + bytearray_type, + memoryview_type, + ) + + # Set up type inference links between equivalent Python/C types + bool_type.equivalent_type = PyrexTypes.c_bint_type + PyrexTypes.c_bint_type.equivalent_type = bool_type + + float_type.equivalent_type = PyrexTypes.c_double_type + PyrexTypes.c_double_type.equivalent_type = float_type + + complex_type.equivalent_type = PyrexTypes.c_double_complex_type + PyrexTypes.c_double_complex_type.equivalent_type = complex_type + + py_buffer_type = builtin_scope.lookup('Py_buffer').type + + +init_builtins() + +############################## +# Support for a few standard library modules that Cython understands (currently typing and dataclasses) +############################## +_known_module_scopes = {} + +def get_known_standard_library_module_scope(module_name): + mod = _known_module_scopes.get(module_name) + if mod: + return mod + + if module_name == "typing": + mod = ModuleScope(module_name, None, None) + for name, tp in [ + ('Dict', dict_type), + ('List', list_type), + ('Tuple', tuple_type), + ('Set', set_type), + ('FrozenSet', frozenset_type), + ]: + name = EncodedString(name) + entry = mod.declare_type(name, tp, pos = None) + var_entry = Entry(name, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.is_variable = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.%s" % (module_name, name) + + for name in ['ClassVar', 'Optional']: + name = EncodedString(name) + indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("typing."+name)) + entry = mod.declare_type(name, indexed_type, pos = None) + var_entry = Entry(name, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.is_variable = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.%s" % (module_name, name) + _known_module_scopes[module_name] = mod + elif module_name == "dataclasses": + mod = ModuleScope(module_name, None, None) + indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("dataclasses.InitVar")) + initvar_string = EncodedString("InitVar") + entry = mod.declare_type(initvar_string, indexed_type, pos = None) + var_entry = Entry(initvar_string, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.InitVar" % module_name + for name in ["dataclass", "field"]: + mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None) + _known_module_scopes[module_name] = mod + elif module_name == "functools": + mod = ModuleScope(module_name, None, None) + for name in ["total_ordering"]: + mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None) + _known_module_scopes[module_name] = mod + + return mod + + +def get_known_standard_library_entry(qualified_name): + name_parts = qualified_name.split(".") + module_name = EncodedString(name_parts[0]) + rest = name_parts[1:] + + if len(rest) > 1: # for now, we don't know how to deal with any nested modules + return None + + mod = get_known_standard_library_module_scope(module_name) + + # eventually handle more sophisticated multiple lookups if needed + if mod and rest: + return mod.lookup_here(rest[0]) + return None + + +def exprnode_to_known_standard_library_name(node, env): + qualified_name_parts = [] + known_name = None + while node.is_attribute: + qualified_name_parts.append(node.attribute) + node = node.obj + if node.is_name: + entry = env.lookup(node.name) + if entry and entry.known_standard_library_import: + if get_known_standard_library_entry( + entry.known_standard_library_import): + known_name = entry.known_standard_library_import + else: + standard_env = get_known_standard_library_module_scope( + entry.known_standard_library_import) + if standard_env: + qualified_name_parts.append(standard_env.name) + known_name = ".".join(reversed(qualified_name_parts)) + return known_name diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Code.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Code.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e915c6fea3534b4fed1e4df99093f4c3a4721082 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Code.pxd @@ -0,0 +1,131 @@ +# cython: language_level=3 + +cimport cython +from ..StringIOTree cimport StringIOTree + + +cdef class UtilityCodeBase(object): + cpdef format_code(self, code_string, replace_empty_lines=*) + + +cdef class UtilityCode(UtilityCodeBase): + cdef public object name + cdef public object proto + cdef public object impl + cdef public object init + cdef public object cleanup + cdef public object proto_block + cdef public object requires + cdef public dict _cache + cdef public list specialize_list + cdef public object file + + cpdef none_or_sub(self, s, context) + + +cdef class FunctionState: + cdef public set names_taken + cdef public object owner + cdef public object scope + + cdef public object error_label + cdef public size_t label_counter + cdef public set labels_used + cdef public object return_label + cdef public object continue_label + cdef public object break_label + cdef public list yield_labels + + cdef public object return_from_error_cleanup_label # not used in __init__ ? + + cdef public object exc_vars + cdef public object current_except + cdef public bint in_try_finally + cdef public bint can_trace + cdef public bint gil_owned + + cdef public list temps_allocated + cdef public dict temps_free + cdef public dict temps_used_type + cdef public set zombie_temps + cdef public size_t temp_counter + cdef public list collect_temps_stack + + cdef public object closure_temps + cdef public bint should_declare_error_indicator + cdef public bint uses_error_indicator + cdef public bint error_without_exception + + cdef public bint needs_refnanny + + @cython.locals(n=size_t) + cpdef new_label(self, name=*) + cpdef tuple get_loop_labels(self) + cpdef set_loop_labels(self, labels) + cpdef tuple get_all_labels(self) + cpdef set_all_labels(self, labels) + cpdef start_collecting_temps(self) + cpdef stop_collecting_temps(self) + + cpdef list temps_in_use(self) + +cdef class IntConst: + cdef public object cname + cdef public object value + cdef public bint is_long + +cdef class PyObjectConst: + cdef public object cname + cdef public object type + +cdef class StringConst: + cdef public object cname + cdef public object text + cdef public object escaped_value + cdef public dict py_strings + cdef public list py_versions + + @cython.locals(intern=bint, is_str=bint, is_unicode=bint) + cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*) + +## cdef class PyStringConst: +## cdef public object cname +## cdef public object encoding +## cdef public bint is_str +## cdef public bint is_unicode +## cdef public bint intern + +#class GlobalState(object): + +#def funccontext_property(name): + +cdef class CCodeWriter(object): + cdef readonly StringIOTree buffer + cdef readonly list pyclass_stack + cdef readonly object globalstate + cdef readonly object funcstate + cdef object code_config + cdef object last_pos + cdef object last_marked_pos + cdef Py_ssize_t level + cdef public Py_ssize_t call_level # debug-only, see Nodes.py + cdef bint bol + + cpdef write(self, s) + @cython.final + cdef _write_lines(self, s) + cpdef _write_to_buffer(self, s) + cpdef put(self, code) + cpdef put_safe(self, code) + cpdef putln(self, code=*, bint safe=*) + @cython.final + cdef increase_indent(self) + @cython.final + cdef decrease_indent(self) + @cython.final + cdef indent(self) + + +cdef class PyrexCodeWriter: + cdef public object f + cdef public Py_ssize_t level diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CodeGeneration.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CodeGeneration.py new file mode 100644 index 0000000000000000000000000000000000000000..e64049c7f5d88a2ab52c26bd74948f6be8a0e333 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CodeGeneration.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import + +from .Visitor import VisitorTransform +from .Nodes import StatListNode + + +class ExtractPxdCode(VisitorTransform): + """ + Finds nodes in a pxd file that should generate code, and + returns them in a StatListNode. + + The result is a tuple (StatListNode, ModuleScope), i.e. + everything that is needed from the pxd after it is processed. + + A purer approach would be to separately compile the pxd code, + but the result would have to be slightly more sophisticated + than pure strings (functions + wanted interned strings + + wanted utility code + wanted cached objects) so for now this + approach is taken. + """ + + def __call__(self, root): + self.funcs = [] + self.visitchildren(root) + return (StatListNode(root.pos, stats=self.funcs), root.scope) + + def visit_FuncDefNode(self, node): + self.funcs.append(node) + # Do not visit children, nested funcdefnodes will + # also be moved by this action... + return node + + def visit_Node(self, node): + self.visitchildren(node) + return node diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Future.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Future.py new file mode 100644 index 0000000000000000000000000000000000000000..8de10c0cb583f206808269ce6dbcf7fcb59c39b4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Future.py @@ -0,0 +1,16 @@ +def _get_feature(name): + import __future__ + # fall back to a unique fake object for earlier Python versions or Python 3 + return getattr(__future__, name, object()) + +unicode_literals = _get_feature("unicode_literals") +with_statement = _get_feature("with_statement") # dummy +division = _get_feature("division") +print_function = _get_feature("print_function") +absolute_import = _get_feature("absolute_import") +nested_scopes = _get_feature("nested_scopes") # dummy +generators = _get_feature("generators") # dummy +generator_stop = _get_feature("generator_stop") +annotations = _get_feature("annotations") + +del _get_feature diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Interpreter.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..244397264f76784e90cced119bb18eb57149f9e4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Interpreter.py @@ -0,0 +1,64 @@ +""" +This module deals with interpreting the parse tree as Python +would have done, in the compiler. + +For now this only covers parse tree to value conversion of +compile-time values. +""" + +from __future__ import absolute_import + +from .Nodes import * +from .ExprNodes import * +from .Errors import CompileError + + +class EmptyScope(object): + def lookup(self, name): + return None + +empty_scope = EmptyScope() + +def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()): + """ + Tries to interpret a list of compile time option nodes. + The result will be a tuple (optlist, optdict) but where + all expression nodes have been interpreted. The result is + in the form of tuples (value, pos). + + optlist is a list of nodes, while optdict is a DictNode (the + result optdict is a dict) + + If type_env is set, all type nodes will be analysed and the resulting + type set. Otherwise only interpretateable ExprNodes + are allowed, other nodes raises errors. + + A CompileError will be raised if there are problems. + """ + + def interpret(node, ix): + if ix in type_args: + if type_env: + type = node.analyse_as_type(type_env) + if not type: + raise CompileError(node.pos, "Invalid type.") + return (type, node.pos) + else: + raise CompileError(node.pos, "Type not allowed here.") + else: + if (sys.version_info[0] >=3 and + isinstance(node, StringNode) and + node.unicode_value is not None): + return (node.unicode_value, node.pos) + return (node.compile_time_value(empty_scope), node.pos) + + if optlist: + optlist = [interpret(x, ix) for ix, x in enumerate(optlist)] + if optdict: + assert isinstance(optdict, DictNode) + new_optdict = {} + for item in optdict.key_value_pairs: + new_key, dummy = interpret(item.key, None) + new_optdict[new_key] = interpret(item.value, item.key.value) + optdict = new_optdict + return (optlist, new_optdict) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Lexicon.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..0820c2397e1fb566756460d99be54ce377c0c9e4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Lexicon.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# cython: language_level=3, py2_import=True +# +# Cython Scanner - Lexical Definitions +# + +from __future__ import absolute_import, unicode_literals + +raw_prefixes = "rR" +bytes_prefixes = "bB" +string_prefixes = "fFuU" + bytes_prefixes +char_prefixes = "cC" +any_string_prefix = raw_prefixes + string_prefixes + char_prefixes +IDENT = 'IDENT' + + +def make_lexicon(): + from ..Plex import \ + Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \ + TEXT, IGNORE, Method, State, Lexicon, Range + + nonzero_digit = Any("123456789") + digit = Any("0123456789") + bindigit = Any("01") + octdigit = Any("01234567") + hexdigit = Any("0123456789ABCDEFabcdef") + indentation = Bol + Rep(Any(" \t")) + + # The list of valid unicode identifier characters are pretty slow to generate at runtime, + # and require Python3, so are just included directly here + # (via the generated code block at the bottom of the file) + unicode_start_character = (Any(unicode_start_ch_any) | Range(unicode_start_ch_range)) + unicode_continuation_character = ( + unicode_start_character | + Any(unicode_continuation_ch_any) | Range(unicode_continuation_ch_range)) + + def underscore_digits(d): + return Rep1(d) + Rep(Str("_") + Rep1(d)) + + def prefixed_digits(prefix, digits): + return prefix + Opt(Str("_")) + underscore_digits(digits) + + decimal = underscore_digits(digit) + dot = Str(".") + exponent = Any("Ee") + Opt(Any("+-")) + decimal + decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal) + + #name = letter + Rep(letter | digit) + name = unicode_start_character + Rep(unicode_continuation_character) + intconst = (prefixed_digits(nonzero_digit, digit) | # decimal literals with underscores must not start with '0' + (Str("0") + (prefixed_digits(Any("Xx"), hexdigit) | + prefixed_digits(Any("Oo"), octdigit) | + prefixed_digits(Any("Bb"), bindigit) )) | + underscore_digits(Str('0')) # 0_0_0_0... is allowed as a decimal literal + | Rep1(digit) # FIXME: remove these Py2 style decimal/octal literals (PY_VERSION_HEX < 3) + ) + intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu"))) + intliteral = intconst + intsuffix + fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent) + imagconst = (intconst | fltconst) + Any("jJ") + + # invalid combinations of prefixes are caught in p_string_literal + beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) | + Any(char_prefixes) + ) + (Str("'") | Str('"') | Str("'''") | Str('"""')) + two_oct = octdigit + octdigit + three_oct = octdigit + octdigit + octdigit + two_hex = hexdigit + hexdigit + four_hex = two_hex + two_hex + escapeseq = Str("\\") + (two_oct | three_oct | + Str('N{') + Rep(AnyBut('}')) + Str('}') | + Str('u') + four_hex | Str('x') + two_hex | + Str('U') + four_hex + four_hex | AnyChar) + + bra = Any("([{") + ket = Any(")]}") + ellipsis = Str("...") + punct = Any(":,;+-*/|&<>=.%`~^?!@") + diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//", + "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=", + "<<=", ">>=", "**=", "//=", "->", "@=", "&&", "||", ':=') + spaces = Rep1(Any(" \t\f")) + escaped_newline = Str("\\\n") + lineterm = Eol + Opt(Str("\n")) + + comment = Str("#") + Rep(AnyBut("\n")) + + return Lexicon([ + (name, Method('normalize_ident')), + (intliteral, Method('strip_underscores', symbol='INT')), + (fltconst, Method('strip_underscores', symbol='FLOAT')), + (imagconst, Method('strip_underscores', symbol='IMAG')), + (ellipsis | punct | diphthong, TEXT), + + (bra, Method('open_bracket_action')), + (ket, Method('close_bracket_action')), + (lineterm, Method('newline_action')), + + (beginstring, Method('begin_string_action')), + + (comment, IGNORE), + (spaces, IGNORE), + (escaped_newline, IGNORE), + + State('INDENT', [ + (comment + lineterm, Method('commentline')), + (Opt(spaces) + Opt(comment) + lineterm, IGNORE), + (indentation, Method('indentation_action')), + (Eof, Method('eof_action')) + ]), + + State('SQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut("'\"\n\\")), 'CHARS'), + (Str('"'), 'CHARS'), + (Str("\n"), Method('unclosed_string_action')), + (Str("'"), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('DQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut('"\n\\')), 'CHARS'), + (Str("'"), 'CHARS'), + (Str("\n"), Method('unclosed_string_action')), + (Str('"'), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('TSQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut("'\"\n\\")), 'CHARS'), + (Any("'\""), 'CHARS'), + (Str("\n"), 'NEWLINE'), + (Str("'''"), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('TDQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut('"\'\n\\')), 'CHARS'), + (Any("'\""), 'CHARS'), + (Str("\n"), 'NEWLINE'), + (Str('"""'), Method('end_string_action')), + (Eof, 'EOF') + ]), + + (Eof, Method('eof_action')) + ], + + # FIXME: Plex 1.9 needs different args here from Plex 1.1.4 + #debug_flags = scanner_debug_flags, + #debug_file = scanner_dump_file + ) + + +# BEGIN GENERATED CODE +# Generated with 'cython-generate-lexicon.py' from: +# cpython 3.12.0a7+ (heads/master:4cd1cc843a, Apr 11 2023, 10:32:26) [GCC 11.3.0] + +unicode_start_ch_any = ( + u"\u005f\u00aa\u00b5\u00ba\u02ec\u02ee\u037f\u0386\u038c\u0559\u06d5" + u"\u06ff\u0710\u07b1\u07fa\u081a\u0824\u0828\u093d\u0950\u09b2\u09bd" + u"\u09ce\u09fc\u0a5e\u0abd\u0ad0\u0af9\u0b3d\u0b71\u0b83\u0b9c\u0bd0" + u"\u0c3d\u0c5d\u0c80\u0cbd\u0d3d\u0d4e\u0dbd\u0e32\u0e84\u0ea5\u0eb2" + u"\u0ebd\u0ec6\u0f00\u103f\u1061\u108e\u10c7\u10cd\u1258\u12c0\u17d7" + u"\u17dc\u18aa\u1aa7\u1cfa\u1f59\u1f5b\u1f5d\u1fbe\u2071\u207f\u2102" + u"\u2107\u2115\u2124\u2126\u2128\u214e\u2d27\u2d2d\u2d6f\ua7d3\ua8fb" + u"\ua9cf\uaa7a\uaab1\uaac0\uaac2\ufb1d\ufb3e\ufe71\ufe73\ufe77\ufe79" + u"\ufe7b\ufe7d\U00010808\U0001083c\U00010a00\U00010f27\U00011075\U00011144\U00011147\U00011176\U000111da" + u"\U000111dc\U00011288\U0001133d\U00011350\U000114c7\U00011644\U000116b8\U00011909\U0001193f\U00011941\U000119e1" + u"\U000119e3\U00011a00\U00011a3a\U00011a50\U00011a9d\U00011c40\U00011d46\U00011d98\U00011f02\U00011fb0\U00016f50" + u"\U00016fe3\U0001b132\U0001b155\U0001d4a2\U0001d4bb\U0001d546\U0001e14e\U0001e94b\U0001ee24\U0001ee27\U0001ee39" + u"\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f" + u"\U0001ee64\U0001ee7e" +) +unicode_start_ch_range = ( + u"\u0041\u005a\u0061\u007a\u00c0\u00d6\u00d8\u00f6\u00f8\u02c1\u02c6" + u"\u02d1\u02e0\u02e4\u0370\u0374\u0376\u0377\u037b\u037d\u0388\u038a" + u"\u038e\u03a1\u03a3\u03f5\u03f7\u0481\u048a\u052f\u0531\u0556\u0560" + u"\u0588\u05d0\u05ea\u05ef\u05f2\u0620\u064a\u066e\u066f\u0671\u06d3" + u"\u06e5\u06e6\u06ee\u06ef\u06fa\u06fc\u0712\u072f\u074d\u07a5\u07ca" + u"\u07ea\u07f4\u07f5\u0800\u0815\u0840\u0858\u0860\u086a\u0870\u0887" + u"\u0889\u088e\u08a0\u08c9\u0904\u0939\u0958\u0961\u0971\u0980\u0985" + u"\u098c\u098f\u0990\u0993\u09a8\u09aa\u09b0\u09b6\u09b9\u09dc\u09dd" + u"\u09df\u09e1\u09f0\u09f1\u0a05\u0a0a\u0a0f\u0a10\u0a13\u0a28\u0a2a" + u"\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59\u0a5c\u0a72\u0a74" + u"\u0a85\u0a8d\u0a8f\u0a91\u0a93\u0aa8\u0aaa\u0ab0\u0ab2\u0ab3\u0ab5" + u"\u0ab9\u0ae0\u0ae1\u0b05\u0b0c\u0b0f\u0b10\u0b13\u0b28\u0b2a\u0b30" + u"\u0b32\u0b33\u0b35\u0b39\u0b5c\u0b5d\u0b5f\u0b61\u0b85\u0b8a\u0b8e" + u"\u0b90\u0b92\u0b95\u0b99\u0b9a\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8\u0baa" + u"\u0bae\u0bb9\u0c05\u0c0c\u0c0e\u0c10\u0c12\u0c28\u0c2a\u0c39\u0c58" + u"\u0c5a\u0c60\u0c61\u0c85\u0c8c\u0c8e\u0c90\u0c92\u0ca8\u0caa\u0cb3" + u"\u0cb5\u0cb9\u0cdd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d04\u0d0c\u0d0e" + u"\u0d10\u0d12\u0d3a\u0d54\u0d56\u0d5f\u0d61\u0d7a\u0d7f\u0d85\u0d96" + u"\u0d9a\u0db1\u0db3\u0dbb\u0dc0\u0dc6\u0e01\u0e30\u0e40\u0e46\u0e81" + u"\u0e82\u0e86\u0e8a\u0e8c\u0ea3\u0ea7\u0eb0\u0ec0\u0ec4\u0edc\u0edf" + u"\u0f40\u0f47\u0f49\u0f6c\u0f88\u0f8c\u1000\u102a\u1050\u1055\u105a" + u"\u105d\u1065\u1066\u106e\u1070\u1075\u1081\u10a0\u10c5\u10d0\u10fa" + u"\u10fc\u1248\u124a\u124d\u1250\u1256\u125a\u125d\u1260\u1288\u128a" + u"\u128d\u1290\u12b0\u12b2\u12b5\u12b8\u12be\u12c2\u12c5\u12c8\u12d6" + u"\u12d8\u1310\u1312\u1315\u1318\u135a\u1380\u138f\u13a0\u13f5\u13f8" + u"\u13fd\u1401\u166c\u166f\u167f\u1681\u169a\u16a0\u16ea\u16ee\u16f8" + u"\u1700\u1711\u171f\u1731\u1740\u1751\u1760\u176c\u176e\u1770\u1780" + u"\u17b3\u1820\u1878\u1880\u18a8\u18b0\u18f5\u1900\u191e\u1950\u196d" + u"\u1970\u1974\u1980\u19ab\u19b0\u19c9\u1a00\u1a16\u1a20\u1a54\u1b05" + u"\u1b33\u1b45\u1b4c\u1b83\u1ba0\u1bae\u1baf\u1bba\u1be5\u1c00\u1c23" + u"\u1c4d\u1c4f\u1c5a\u1c7d\u1c80\u1c88\u1c90\u1cba\u1cbd\u1cbf\u1ce9" + u"\u1cec\u1cee\u1cf3\u1cf5\u1cf6\u1d00\u1dbf\u1e00\u1f15\u1f18\u1f1d" + u"\u1f20\u1f45\u1f48\u1f4d\u1f50\u1f57\u1f5f\u1f7d\u1f80\u1fb4\u1fb6" + u"\u1fbc\u1fc2\u1fc4\u1fc6\u1fcc\u1fd0\u1fd3\u1fd6\u1fdb\u1fe0\u1fec" + u"\u1ff2\u1ff4\u1ff6\u1ffc\u2090\u209c\u210a\u2113\u2118\u211d\u212a" + u"\u2139\u213c\u213f\u2145\u2149\u2160\u2188\u2c00\u2ce4\u2ceb\u2cee" + u"\u2cf2\u2cf3\u2d00\u2d25\u2d30\u2d67\u2d80\u2d96\u2da0\u2da6\u2da8" + u"\u2dae\u2db0\u2db6\u2db8\u2dbe\u2dc0\u2dc6\u2dc8\u2dce\u2dd0\u2dd6" + u"\u2dd8\u2dde\u3005\u3007\u3021\u3029\u3031\u3035\u3038\u303c\u3041" + u"\u3096\u309d\u309f\u30a1\u30fa\u30fc\u30ff\u3105\u312f\u3131\u318e" + u"\u31a0\u31bf\u31f0\u31ff\u3400\u4dbf\u4e00\ua48c\ua4d0\ua4fd\ua500" + u"\ua60c\ua610\ua61f\ua62a\ua62b\ua640\ua66e\ua67f\ua69d\ua6a0\ua6ef" + u"\ua717\ua71f\ua722\ua788\ua78b\ua7ca\ua7d0\ua7d1\ua7d5\ua7d9\ua7f2" + u"\ua801\ua803\ua805\ua807\ua80a\ua80c\ua822\ua840\ua873\ua882\ua8b3" + u"\ua8f2\ua8f7\ua8fd\ua8fe\ua90a\ua925\ua930\ua946\ua960\ua97c\ua984" + u"\ua9b2\ua9e0\ua9e4\ua9e6\ua9ef\ua9fa\ua9fe\uaa00\uaa28\uaa40\uaa42" + u"\uaa44\uaa4b\uaa60\uaa76\uaa7e\uaaaf\uaab5\uaab6\uaab9\uaabd\uaadb" + u"\uaadd\uaae0\uaaea\uaaf2\uaaf4\uab01\uab06\uab09\uab0e\uab11\uab16" + u"\uab20\uab26\uab28\uab2e\uab30\uab5a\uab5c\uab69\uab70\uabe2\uac00" + u"\ud7a3\ud7b0\ud7c6\ud7cb\ud7fb\uf900\ufa6d\ufa70\ufad9\ufb00\ufb06" + u"\ufb13\ufb17\ufb1f\ufb28\ufb2a\ufb36\ufb38\ufb3c\ufb40\ufb41\ufb43" + u"\ufb44\ufb46\ufbb1\ufbd3\ufc5d\ufc64\ufd3d\ufd50\ufd8f\ufd92\ufdc7" + u"\ufdf0\ufdf9\ufe7f\ufefc\uff21\uff3a\uff41\uff5a\uff66\uff9d\uffa0" + u"\uffbe\uffc2\uffc7\uffca\uffcf\uffd2\uffd7\uffda\uffdc\U00010000\U0001000b" + u"\U0001000d\U00010026\U00010028\U0001003a\U0001003c\U0001003d\U0001003f\U0001004d\U00010050\U0001005d\U00010080" + u"\U000100fa\U00010140\U00010174\U00010280\U0001029c\U000102a0\U000102d0\U00010300\U0001031f\U0001032d\U0001034a" + u"\U00010350\U00010375\U00010380\U0001039d\U000103a0\U000103c3\U000103c8\U000103cf\U000103d1\U000103d5\U00010400" + u"\U0001049d\U000104b0\U000104d3\U000104d8\U000104fb\U00010500\U00010527\U00010530\U00010563\U00010570\U0001057a" + u"\U0001057c\U0001058a\U0001058c\U00010592\U00010594\U00010595\U00010597\U000105a1\U000105a3\U000105b1\U000105b3" + u"\U000105b9\U000105bb\U000105bc\U00010600\U00010736\U00010740\U00010755\U00010760\U00010767\U00010780\U00010785" + u"\U00010787\U000107b0\U000107b2\U000107ba\U00010800\U00010805\U0001080a\U00010835\U00010837\U00010838\U0001083f" + u"\U00010855\U00010860\U00010876\U00010880\U0001089e\U000108e0\U000108f2\U000108f4\U000108f5\U00010900\U00010915" + u"\U00010920\U00010939\U00010980\U000109b7\U000109be\U000109bf\U00010a10\U00010a13\U00010a15\U00010a17\U00010a19" + u"\U00010a35\U00010a60\U00010a7c\U00010a80\U00010a9c\U00010ac0\U00010ac7\U00010ac9\U00010ae4\U00010b00\U00010b35" + u"\U00010b40\U00010b55\U00010b60\U00010b72\U00010b80\U00010b91\U00010c00\U00010c48\U00010c80\U00010cb2\U00010cc0" + u"\U00010cf2\U00010d00\U00010d23\U00010e80\U00010ea9\U00010eb0\U00010eb1\U00010f00\U00010f1c\U00010f30\U00010f45" + u"\U00010f70\U00010f81\U00010fb0\U00010fc4\U00010fe0\U00010ff6\U00011003\U00011037\U00011071\U00011072\U00011083" + u"\U000110af\U000110d0\U000110e8\U00011103\U00011126\U00011150\U00011172\U00011183\U000111b2\U000111c1\U000111c4" + u"\U00011200\U00011211\U00011213\U0001122b\U0001123f\U00011240\U00011280\U00011286\U0001128a\U0001128d\U0001128f" + u"\U0001129d\U0001129f\U000112a8\U000112b0\U000112de\U00011305\U0001130c\U0001130f\U00011310\U00011313\U00011328" + u"\U0001132a\U00011330\U00011332\U00011333\U00011335\U00011339\U0001135d\U00011361\U00011400\U00011434\U00011447" + u"\U0001144a\U0001145f\U00011461\U00011480\U000114af\U000114c4\U000114c5\U00011580\U000115ae\U000115d8\U000115db" + u"\U00011600\U0001162f\U00011680\U000116aa\U00011700\U0001171a\U00011740\U00011746\U00011800\U0001182b\U000118a0" + u"\U000118df\U000118ff\U00011906\U0001190c\U00011913\U00011915\U00011916\U00011918\U0001192f\U000119a0\U000119a7" + u"\U000119aa\U000119d0\U00011a0b\U00011a32\U00011a5c\U00011a89\U00011ab0\U00011af8\U00011c00\U00011c08\U00011c0a" + u"\U00011c2e\U00011c72\U00011c8f\U00011d00\U00011d06\U00011d08\U00011d09\U00011d0b\U00011d30\U00011d60\U00011d65" + u"\U00011d67\U00011d68\U00011d6a\U00011d89\U00011ee0\U00011ef2\U00011f04\U00011f10\U00011f12\U00011f33\U00012000" + u"\U00012399\U00012400\U0001246e\U00012480\U00012543\U00012f90\U00012ff0\U00013000\U0001342f\U00013441\U00013446" + u"\U00014400\U00014646\U00016800\U00016a38\U00016a40\U00016a5e\U00016a70\U00016abe\U00016ad0\U00016aed\U00016b00" + u"\U00016b2f\U00016b40\U00016b43\U00016b63\U00016b77\U00016b7d\U00016b8f\U00016e40\U00016e7f\U00016f00\U00016f4a" + u"\U00016f93\U00016f9f\U00016fe0\U00016fe1\U00017000\U000187f7\U00018800\U00018cd5\U00018d00\U00018d08\U0001aff0" + u"\U0001aff3\U0001aff5\U0001affb\U0001affd\U0001affe\U0001b000\U0001b122\U0001b150\U0001b152\U0001b164\U0001b167" + u"\U0001b170\U0001b2fb\U0001bc00\U0001bc6a\U0001bc70\U0001bc7c\U0001bc80\U0001bc88\U0001bc90\U0001bc99\U0001d400" + u"\U0001d454\U0001d456\U0001d49c\U0001d49e\U0001d49f\U0001d4a5\U0001d4a6\U0001d4a9\U0001d4ac\U0001d4ae\U0001d4b9" + u"\U0001d4bd\U0001d4c3\U0001d4c5\U0001d505\U0001d507\U0001d50a\U0001d50d\U0001d514\U0001d516\U0001d51c\U0001d51e" + u"\U0001d539\U0001d53b\U0001d53e\U0001d540\U0001d544\U0001d54a\U0001d550\U0001d552\U0001d6a5\U0001d6a8\U0001d6c0" + u"\U0001d6c2\U0001d6da\U0001d6dc\U0001d6fa\U0001d6fc\U0001d714\U0001d716\U0001d734\U0001d736\U0001d74e\U0001d750" + u"\U0001d76e\U0001d770\U0001d788\U0001d78a\U0001d7a8\U0001d7aa\U0001d7c2\U0001d7c4\U0001d7cb\U0001df00\U0001df1e" + u"\U0001df25\U0001df2a\U0001e030\U0001e06d\U0001e100\U0001e12c\U0001e137\U0001e13d\U0001e290\U0001e2ad\U0001e2c0" + u"\U0001e2eb\U0001e4d0\U0001e4eb\U0001e7e0\U0001e7e6\U0001e7e8\U0001e7eb\U0001e7ed\U0001e7ee\U0001e7f0\U0001e7fe" + u"\U0001e800\U0001e8c4\U0001e900\U0001e943\U0001ee00\U0001ee03\U0001ee05\U0001ee1f\U0001ee21\U0001ee22\U0001ee29" + u"\U0001ee32\U0001ee34\U0001ee37\U0001ee4d\U0001ee4f\U0001ee51\U0001ee52\U0001ee61\U0001ee62\U0001ee67\U0001ee6a" + u"\U0001ee6c\U0001ee72\U0001ee74\U0001ee77\U0001ee79\U0001ee7c\U0001ee80\U0001ee89\U0001ee8b\U0001ee9b\U0001eea1" + u"\U0001eea3\U0001eea5\U0001eea9\U0001eeab\U0001eebb\U00020000\U0002a6df\U0002a700\U0002b739\U0002b740\U0002b81d" + u"\U0002b820\U0002cea1\U0002ceb0\U0002ebe0\U0002f800\U0002fa1d\U00030000\U0003134a" +) +unicode_continuation_ch_any = ( + u"\u00b7\u0387\u05bf\u05c7\u0670\u0711\u07fd\u09bc\u09d7\u09fe\u0a3c" + u"\u0a51\u0a75\u0abc\u0b3c\u0b82\u0bd7\u0c3c\u0cbc\u0cf3\u0d57\u0dca" + u"\u0dd6\u0e31\u0eb1\u0f35\u0f37\u0f39\u0fc6\u17dd\u18a9\u1ced\u1cf4" + u"\u2054\u20e1\u2d7f\ua66f\ua802\ua806\ua80b\ua82c\ua9e5\uaa43\uaab0" + u"\uaac1\ufb1e\uff3f\U000101fd\U000102e0\U00010a3f\U000110c2\U00011173\U0001123e\U00011241\U00011357" + u"\U0001145e\U00011940\U000119e4\U00011a47\U00011d3a\U00011d47\U00011f03\U00013440\U00016f4f\U00016fe4\U0001da75" + u"\U0001da84\U0001e08f\U0001e2ae" +) +unicode_continuation_ch_range = ( + u"\u0030\u0039\u0300\u036f\u0483\u0487\u0591\u05bd\u05c1\u05c2\u05c4" + u"\u05c5\u0610\u061a\u064b\u0669\u06d6\u06dc\u06df\u06e4\u06e7\u06e8" + u"\u06ea\u06ed\u06f0\u06f9\u0730\u074a\u07a6\u07b0\u07c0\u07c9\u07eb" + u"\u07f3\u0816\u0819\u081b\u0823\u0825\u0827\u0829\u082d\u0859\u085b" + u"\u0898\u089f\u08ca\u08e1\u08e3\u0903\u093a\u093c\u093e\u094f\u0951" + u"\u0957\u0962\u0963\u0966\u096f\u0981\u0983\u09be\u09c4\u09c7\u09c8" + u"\u09cb\u09cd\u09e2\u09e3\u09e6\u09ef\u0a01\u0a03\u0a3e\u0a42\u0a47" + u"\u0a48\u0a4b\u0a4d\u0a66\u0a71\u0a81\u0a83\u0abe\u0ac5\u0ac7\u0ac9" + u"\u0acb\u0acd\u0ae2\u0ae3\u0ae6\u0aef\u0afa\u0aff\u0b01\u0b03\u0b3e" + u"\u0b44\u0b47\u0b48\u0b4b\u0b4d\u0b55\u0b57\u0b62\u0b63\u0b66\u0b6f" + u"\u0bbe\u0bc2\u0bc6\u0bc8\u0bca\u0bcd\u0be6\u0bef\u0c00\u0c04\u0c3e" + u"\u0c44\u0c46\u0c48\u0c4a\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66\u0c6f" + u"\u0c81\u0c83\u0cbe\u0cc4\u0cc6\u0cc8\u0cca\u0ccd\u0cd5\u0cd6\u0ce2" + u"\u0ce3\u0ce6\u0cef\u0d00\u0d03\u0d3b\u0d3c\u0d3e\u0d44\u0d46\u0d48" + u"\u0d4a\u0d4d\u0d62\u0d63\u0d66\u0d6f\u0d81\u0d83\u0dcf\u0dd4\u0dd8" + u"\u0ddf\u0de6\u0def\u0df2\u0df3\u0e33\u0e3a\u0e47\u0e4e\u0e50\u0e59" + u"\u0eb3\u0ebc\u0ec8\u0ece\u0ed0\u0ed9\u0f18\u0f19\u0f20\u0f29\u0f3e" + u"\u0f3f\u0f71\u0f84\u0f86\u0f87\u0f8d\u0f97\u0f99\u0fbc\u102b\u103e" + u"\u1040\u1049\u1056\u1059\u105e\u1060\u1062\u1064\u1067\u106d\u1071" + u"\u1074\u1082\u108d\u108f\u109d\u135d\u135f\u1369\u1371\u1712\u1715" + u"\u1732\u1734\u1752\u1753\u1772\u1773\u17b4\u17d3\u17e0\u17e9\u180b" + u"\u180d\u180f\u1819\u1920\u192b\u1930\u193b\u1946\u194f\u19d0\u19da" + u"\u1a17\u1a1b\u1a55\u1a5e\u1a60\u1a7c\u1a7f\u1a89\u1a90\u1a99\u1ab0" + u"\u1abd\u1abf\u1ace\u1b00\u1b04\u1b34\u1b44\u1b50\u1b59\u1b6b\u1b73" + u"\u1b80\u1b82\u1ba1\u1bad\u1bb0\u1bb9\u1be6\u1bf3\u1c24\u1c37\u1c40" + u"\u1c49\u1c50\u1c59\u1cd0\u1cd2\u1cd4\u1ce8\u1cf7\u1cf9\u1dc0\u1dff" + u"\u203f\u2040\u20d0\u20dc\u20e5\u20f0\u2cef\u2cf1\u2de0\u2dff\u302a" + u"\u302f\u3099\u309a\ua620\ua629\ua674\ua67d\ua69e\ua69f\ua6f0\ua6f1" + u"\ua823\ua827\ua880\ua881\ua8b4\ua8c5\ua8d0\ua8d9\ua8e0\ua8f1\ua8ff" + u"\ua909\ua926\ua92d\ua947\ua953\ua980\ua983\ua9b3\ua9c0\ua9d0\ua9d9" + u"\ua9f0\ua9f9\uaa29\uaa36\uaa4c\uaa4d\uaa50\uaa59\uaa7b\uaa7d\uaab2" + u"\uaab4\uaab7\uaab8\uaabe\uaabf\uaaeb\uaaef\uaaf5\uaaf6\uabe3\uabea" + u"\uabec\uabed\uabf0\uabf9\ufe00\ufe0f\ufe20\ufe2f\ufe33\ufe34\ufe4d" + u"\ufe4f\uff10\uff19\uff9e\uff9f\U00010376\U0001037a\U000104a0\U000104a9\U00010a01\U00010a03" + u"\U00010a05\U00010a06\U00010a0c\U00010a0f\U00010a38\U00010a3a\U00010ae5\U00010ae6\U00010d24\U00010d27\U00010d30" + u"\U00010d39\U00010eab\U00010eac\U00010efd\U00010eff\U00010f46\U00010f50\U00010f82\U00010f85\U00011000\U00011002" + u"\U00011038\U00011046\U00011066\U00011070\U00011073\U00011074\U0001107f\U00011082\U000110b0\U000110ba\U000110f0" + u"\U000110f9\U00011100\U00011102\U00011127\U00011134\U00011136\U0001113f\U00011145\U00011146\U00011180\U00011182" + u"\U000111b3\U000111c0\U000111c9\U000111cc\U000111ce\U000111d9\U0001122c\U00011237\U000112df\U000112ea\U000112f0" + u"\U000112f9\U00011300\U00011303\U0001133b\U0001133c\U0001133e\U00011344\U00011347\U00011348\U0001134b\U0001134d" + u"\U00011362\U00011363\U00011366\U0001136c\U00011370\U00011374\U00011435\U00011446\U00011450\U00011459\U000114b0" + u"\U000114c3\U000114d0\U000114d9\U000115af\U000115b5\U000115b8\U000115c0\U000115dc\U000115dd\U00011630\U00011640" + u"\U00011650\U00011659\U000116ab\U000116b7\U000116c0\U000116c9\U0001171d\U0001172b\U00011730\U00011739\U0001182c" + u"\U0001183a\U000118e0\U000118e9\U00011930\U00011935\U00011937\U00011938\U0001193b\U0001193e\U00011942\U00011943" + u"\U00011950\U00011959\U000119d1\U000119d7\U000119da\U000119e0\U00011a01\U00011a0a\U00011a33\U00011a39\U00011a3b" + u"\U00011a3e\U00011a51\U00011a5b\U00011a8a\U00011a99\U00011c2f\U00011c36\U00011c38\U00011c3f\U00011c50\U00011c59" + u"\U00011c92\U00011ca7\U00011ca9\U00011cb6\U00011d31\U00011d36\U00011d3c\U00011d3d\U00011d3f\U00011d45\U00011d50" + u"\U00011d59\U00011d8a\U00011d8e\U00011d90\U00011d91\U00011d93\U00011d97\U00011da0\U00011da9\U00011ef3\U00011ef6" + u"\U00011f00\U00011f01\U00011f34\U00011f3a\U00011f3e\U00011f42\U00011f50\U00011f59\U00013447\U00013455\U00016a60" + u"\U00016a69\U00016ac0\U00016ac9\U00016af0\U00016af4\U00016b30\U00016b36\U00016b50\U00016b59\U00016f51\U00016f87" + u"\U00016f8f\U00016f92\U00016ff0\U00016ff1\U0001bc9d\U0001bc9e\U0001cf00\U0001cf2d\U0001cf30\U0001cf46\U0001d165" + u"\U0001d169\U0001d16d\U0001d172\U0001d17b\U0001d182\U0001d185\U0001d18b\U0001d1aa\U0001d1ad\U0001d242\U0001d244" + u"\U0001d7ce\U0001d7ff\U0001da00\U0001da36\U0001da3b\U0001da6c\U0001da9b\U0001da9f\U0001daa1\U0001daaf\U0001e000" + u"\U0001e006\U0001e008\U0001e018\U0001e01b\U0001e021\U0001e023\U0001e024\U0001e026\U0001e02a\U0001e130\U0001e136" + u"\U0001e140\U0001e149\U0001e2ec\U0001e2f9\U0001e4ec\U0001e4f9\U0001e8d0\U0001e8d6\U0001e944\U0001e94a\U0001e950" + u"\U0001e959\U0001fbf0\U0001fbf9" +) + +# END GENERATED CODE diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/MemoryView.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/MemoryView.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebd396be695efd62e112d7df63cc19eff2d4e66 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/MemoryView.py @@ -0,0 +1,863 @@ +from __future__ import absolute_import + +from .Errors import CompileError, error +from . import ExprNodes +from .ExprNodes import IntNode, NameNode, AttributeNode +from . import Options +from .Code import UtilityCode, TempitaUtilityCode +from .UtilityCode import CythonUtilityCode +from . import Buffer +from . import PyrexTypes +from . import ModuleNode + +START_ERR = "Start must not be given." +STOP_ERR = "Axis specification only allowed in the 'step' slot." +STEP_ERR = "Step must be omitted, 1, or a valid specifier." +BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous." +INVALID_ERR = "Invalid axis specification." +NOT_CIMPORTED_ERR = "Variable was not cimported from cython.view" +EXPR_ERR = "no expressions allowed in axis spec, only names and literals." +CF_ERR = "Invalid axis specification for a C/Fortran contiguous array." +ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the " + "GIL, consider using initializedcheck(False)") + + +format_flag = "PyBUF_FORMAT" + +memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)" +memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)" +memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)" +memview_full_access = "PyBUF_FULL_RO" +#memview_strided_access = "PyBUF_STRIDED_RO" +memview_strided_access = "PyBUF_RECORDS_RO" + +MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT' +MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR' +MEMVIEW_FULL = '__Pyx_MEMVIEW_FULL' +MEMVIEW_CONTIG = '__Pyx_MEMVIEW_CONTIG' +MEMVIEW_STRIDED= '__Pyx_MEMVIEW_STRIDED' +MEMVIEW_FOLLOW = '__Pyx_MEMVIEW_FOLLOW' + +_spec_to_const = { + 'direct' : MEMVIEW_DIRECT, + 'ptr' : MEMVIEW_PTR, + 'full' : MEMVIEW_FULL, + 'contig' : MEMVIEW_CONTIG, + 'strided': MEMVIEW_STRIDED, + 'follow' : MEMVIEW_FOLLOW, + } + +_spec_to_abbrev = { + 'direct' : 'd', + 'ptr' : 'p', + 'full' : 'f', + 'contig' : 'c', + 'strided' : 's', + 'follow' : '_', +} + +memslice_entry_init = "{ 0, 0, { 0 }, { 0 }, { 0 } }" + +memview_name = u'memoryview' +memview_typeptr_cname = '__pyx_memoryview_type' +memview_objstruct_cname = '__pyx_memoryview_obj' +memviewslice_cname = u'__Pyx_memviewslice' + + +def put_init_entry(mv_cname, code): + code.putln("%s.data = NULL;" % mv_cname) + code.putln("%s.memview = NULL;" % mv_cname) + + +#def axes_to_str(axes): +# return "".join([access[0].upper()+packing[0] for (access, packing) in axes]) + + +def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code, + have_gil=False, first_assignment=True): + "We can avoid decreffing the lhs if we know it is the first assignment" + assert rhs.type.is_memoryviewslice + + pretty_rhs = rhs.result_in_temp() or rhs.is_simple() + if pretty_rhs: + rhstmp = rhs.result() + else: + rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False) + code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type))) + + # Allow uninitialized assignment + #code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry)) + put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code, + have_gil=have_gil, first_assignment=first_assignment) + + if not pretty_rhs: + code.funcstate.release_temp(rhstmp) + + +def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code, + have_gil=False, first_assignment=False): + if lhs_cname == rhs_cname: + # self assignment is tricky because memoryview xdecref clears the memoryview + # thus invalidating both sides of the assignment. Therefore make it actually do nothing + code.putln("/* memoryview self assignment no-op */") + return + + if not first_assignment: + code.put_xdecref(lhs_cname, memviewslicetype, + have_gil=have_gil) + + if not rhs.result_in_temp(): + rhs.make_owned_memoryviewslice(code) + + code.putln("%s = %s;" % (lhs_cname, rhs_cname)) + + +def get_buf_flags(specs): + is_c_contig, is_f_contig = is_cf_contig(specs) + + if is_c_contig: + return memview_c_contiguous + elif is_f_contig: + return memview_f_contiguous + + access, packing = zip(*specs) + + if 'full' in access or 'ptr' in access: + return memview_full_access + else: + return memview_strided_access + + +def insert_newaxes(memoryviewtype, n): + axes = [('direct', 'strided')] * n + axes.extend(memoryviewtype.axes) + return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes) + + +def broadcast_types(src, dst): + n = abs(src.ndim - dst.ndim) + if src.ndim < dst.ndim: + return insert_newaxes(src, n), dst + else: + return src, insert_newaxes(dst, n) + + +def valid_memslice_dtype(dtype, i=0): + """ + Return whether type dtype can be used as the base type of a + memoryview slice. + + We support structs, numeric types and objects + """ + if dtype.is_complex and dtype.real_type.is_int: + return False + + if dtype is PyrexTypes.c_bint_type: + return False + + if dtype.is_struct and dtype.kind == 'struct': + for member in dtype.scope.var_entries: + if not valid_memslice_dtype(member.type): + return False + + return True + + return ( + dtype.is_error or + # Pointers are not valid (yet) + # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or + (dtype.is_array and i < 8 and + valid_memslice_dtype(dtype.base_type, i + 1)) or + dtype.is_numeric or + dtype.is_pyobject or + dtype.is_fused or # accept this as it will be replaced by specializations later + (dtype.is_typedef and valid_memslice_dtype(dtype.typedef_base_type)) + ) + + +class MemoryViewSliceBufferEntry(Buffer.BufferEntry): + """ + May be used during code generation time to be queried for + shape/strides/suboffsets attributes, or to perform indexing or slicing. + """ + def __init__(self, entry): + self.entry = entry + self.type = entry.type + self.cname = entry.cname + + self.buf_ptr = "%s.data" % self.cname + + dtype = self.entry.type.dtype + self.buf_ptr_type = PyrexTypes.CPtrType(dtype) + self.init_attributes() + + def get_buf_suboffsetvars(self): + return self._for_all_ndim("%s.suboffsets[%d]") + + def get_buf_stridevars(self): + return self._for_all_ndim("%s.strides[%d]") + + def get_buf_shapevars(self): + return self._for_all_ndim("%s.shape[%d]") + + def generate_buffer_lookup_code(self, code, index_cnames): + axes = [(dim, index_cnames[dim], access, packing) + for dim, (access, packing) in enumerate(self.type.axes)] + return self._generate_buffer_lookup_code(code, axes) + + def _generate_buffer_lookup_code(self, code, axes, cast_result=True): + """ + Generate a single expression that indexes the memory view slice + in each dimension. + """ + bufp = self.buf_ptr + type_decl = self.type.dtype.empty_declaration_code() + + for dim, index, access, packing in axes: + shape = "%s.shape[%d]" % (self.cname, dim) + stride = "%s.strides[%d]" % (self.cname, dim) + suboffset = "%s.suboffsets[%d]" % (self.cname, dim) + + flag = get_memoryview_flag(access, packing) + + if flag in ("generic", "generic_contiguous"): + # Note: we cannot do cast tricks to avoid stride multiplication + # for generic_contiguous, as we may have to do (dtype *) + # or (dtype **) arithmetic, we won't know which unless + # we check suboffsets + code.globalstate.use_utility_code(memviewslice_index_helpers) + bufp = ('__pyx_memviewslice_index_full(%s, %s, %s, %s)' % + (bufp, index, stride, suboffset)) + + elif flag == "indirect": + bufp = "(%s + %s * %s)" % (bufp, index, stride) + bufp = ("(*((char **) %s) + %s)" % (bufp, suboffset)) + + elif flag == "indirect_contiguous": + # Note: we do char ** arithmetic + bufp = "(*((char **) %s + %s) + %s)" % (bufp, index, suboffset) + + elif flag == "strided": + bufp = "(%s + %s * %s)" % (bufp, index, stride) + + else: + assert flag == 'contiguous', flag + bufp = '((char *) (((%s *) %s) + %s))' % (type_decl, bufp, index) + + bufp = '( /* dim=%d */ %s )' % (dim, bufp) + + if cast_result: + return "((%s *) %s)" % (type_decl, bufp) + + return bufp + + def generate_buffer_slice_code(self, code, indices, dst, dst_type, have_gil, + have_slices, directives): + """ + Slice a memoryviewslice. + + indices - list of index nodes. If not a SliceNode, or NoneNode, + then it must be coercible to Py_ssize_t + + Simply call __pyx_memoryview_slice_memviewslice with the right + arguments, unless the dimension is omitted or a bare ':', in which + case we copy over the shape/strides/suboffsets attributes directly + for that dimension. + """ + src = self.cname + + code.putln("%(dst)s.data = %(src)s.data;" % locals()) + code.putln("%(dst)s.memview = %(src)s.memview;" % locals()) + code.put_incref_memoryviewslice(dst, dst_type, have_gil=have_gil) + + all_dimensions_direct = all(access == 'direct' for access, packing in self.type.axes) + suboffset_dim_temp = [] + + def get_suboffset_dim(): + # create global temp variable at request + if not suboffset_dim_temp: + suboffset_dim = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = -1;" % suboffset_dim) + suboffset_dim_temp.append(suboffset_dim) + return suboffset_dim_temp[0] + + dim = -1 + new_ndim = 0 + for index in indices: + if index.is_none: + # newaxis + for attrib, value in [('shape', 1), ('strides', 0), ('suboffsets', -1)]: + code.putln("%s.%s[%d] = %d;" % (dst, attrib, new_ndim, value)) + + new_ndim += 1 + continue + + dim += 1 + access, packing = self.type.axes[dim] + + if index.is_slice: + # slice, unspecified dimension, or part of ellipsis + d = dict(locals()) + for s in "start stop step".split(): + idx = getattr(index, s) + have_idx = d['have_' + s] = not idx.is_none + d[s] = idx.result() if have_idx else "0" + + if not (d['have_start'] or d['have_stop'] or d['have_step']): + # full slice (:), simply copy over the extent, stride + # and suboffset. Also update suboffset_dim if needed + d['access'] = access + util_name = "SimpleSlice" + else: + util_name = "ToughSlice" + d['error_goto'] = code.error_goto(index.pos) + + new_ndim += 1 + else: + # normal index + idx = index.result() + + indirect = access != 'direct' + if indirect: + generic = access == 'full' + if new_ndim != 0: + return error(index.pos, + "All preceding dimensions must be " + "indexed and not sliced") + + d = dict( + locals(), + wraparound=int(directives['wraparound']), + boundscheck=int(directives['boundscheck']), + ) + if d['boundscheck']: + d['error_goto'] = code.error_goto(index.pos) + util_name = "SliceIndex" + + _, impl = TempitaUtilityCode.load_as_string(util_name, "MemoryView_C.c", context=d) + code.put(impl) + + if suboffset_dim_temp: + code.funcstate.release_temp(suboffset_dim_temp[0]) + + +def empty_slice(pos): + none = ExprNodes.NoneNode(pos) + return ExprNodes.SliceNode(pos, start=none, + stop=none, step=none) + + +def unellipsify(indices, ndim): + result = [] + seen_ellipsis = False + have_slices = False + + newaxes = [newaxis for newaxis in indices if newaxis.is_none] + n_indices = len(indices) - len(newaxes) + + for index in indices: + if isinstance(index, ExprNodes.EllipsisNode): + have_slices = True + full_slice = empty_slice(index.pos) + + if seen_ellipsis: + result.append(full_slice) + else: + nslices = ndim - n_indices + 1 + result.extend([full_slice] * nslices) + seen_ellipsis = True + else: + have_slices = have_slices or index.is_slice or index.is_none + result.append(index) + + result_length = len(result) - len(newaxes) + if result_length < ndim: + have_slices = True + nslices = ndim - result_length + result.extend([empty_slice(indices[-1].pos)] * nslices) + + return have_slices, result, newaxes + + +def get_memoryview_flag(access, packing): + if access == 'full' and packing in ('strided', 'follow'): + return 'generic' + elif access == 'full' and packing == 'contig': + return 'generic_contiguous' + elif access == 'ptr' and packing in ('strided', 'follow'): + return 'indirect' + elif access == 'ptr' and packing == 'contig': + return 'indirect_contiguous' + elif access == 'direct' and packing in ('strided', 'follow'): + return 'strided' + else: + assert (access, packing) == ('direct', 'contig'), (access, packing) + return 'contiguous' + + +def get_is_contig_func_name(contig_type, ndim): + assert contig_type in ('C', 'F') + return "__pyx_memviewslice_is_contig_%s%d" % (contig_type, ndim) + + +def get_is_contig_utility(contig_type, ndim): + assert contig_type in ('C', 'F') + C = dict(context, ndim=ndim, contig_type=contig_type) + utility = load_memview_c_utility("MemviewSliceCheckContig", C, requires=[is_contig_utility]) + return utility + + +def slice_iter(slice_type, slice_result, ndim, code, force_strided=False): + if (slice_type.is_c_contig or slice_type.is_f_contig) and not force_strided: + return ContigSliceIter(slice_type, slice_result, ndim, code) + else: + return StridedSliceIter(slice_type, slice_result, ndim, code) + + +class SliceIter(object): + def __init__(self, slice_type, slice_result, ndim, code): + self.slice_type = slice_type + self.slice_result = slice_result + self.code = code + self.ndim = ndim + + +class ContigSliceIter(SliceIter): + def start_loops(self): + code = self.code + code.begin_block() + + type_decl = self.slice_type.dtype.empty_declaration_code() + + total_size = ' * '.join("%s.shape[%d]" % (self.slice_result, i) + for i in range(self.ndim)) + code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size) + code.putln("Py_ssize_t __pyx_temp_idx;") + code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % ( + type_decl, type_decl, self.slice_result)) + code.putln("for (__pyx_temp_idx = 0; " + "__pyx_temp_idx < __pyx_temp_extent; " + "__pyx_temp_idx++) {") + + return "__pyx_temp_pointer" + + def end_loops(self): + self.code.putln("__pyx_temp_pointer += 1;") + self.code.putln("}") + self.code.end_block() + + +class StridedSliceIter(SliceIter): + def start_loops(self): + code = self.code + code.begin_block() + + for i in range(self.ndim): + t = i, self.slice_result, i + code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t) + code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t) + code.putln("char *__pyx_temp_pointer_%d;" % i) + code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i) + + code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_result) + + for i in range(self.ndim): + if i > 0: + code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1)) + + code.putln("for (__pyx_temp_idx_%d = 0; " + "__pyx_temp_idx_%d < __pyx_temp_extent_%d; " + "__pyx_temp_idx_%d++) {" % (i, i, i, i)) + + return "__pyx_temp_pointer_%d" % (self.ndim - 1) + + def end_loops(self): + code = self.code + for i in range(self.ndim - 1, -1, -1): + code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i)) + code.putln("}") + + code.end_block() + + +def copy_c_or_fortran_cname(memview): + if memview.is_c_contig: + c_or_f = 'c' + else: + c_or_f = 'f' + + return "__pyx_memoryview_copy_slice_%s_%s" % ( + memview.specialization_suffix(), c_or_f) + + +def get_copy_new_utility(pos, from_memview, to_memview): + if (from_memview.dtype != to_memview.dtype and + not (from_memview.dtype.is_cv_qualified and from_memview.dtype.cv_base_type == to_memview.dtype)): + error(pos, "dtypes must be the same!") + return + if len(from_memview.axes) != len(to_memview.axes): + error(pos, "number of dimensions must be same") + return + if not (to_memview.is_c_contig or to_memview.is_f_contig): + error(pos, "to_memview must be c or f contiguous.") + return + + for (access, packing) in from_memview.axes: + if access != 'direct': + error(pos, "cannot handle 'full' or 'ptr' access at this time.") + return + + if to_memview.is_c_contig: + mode = 'c' + contig_flag = memview_c_contiguous + else: + assert to_memview.is_f_contig + mode = 'fortran' + contig_flag = memview_f_contiguous + + return load_memview_c_utility( + "CopyContentsUtility", + context=dict( + context, + mode=mode, + dtype_decl=to_memview.dtype.empty_declaration_code(), + contig_flag=contig_flag, + ndim=to_memview.ndim, + func_cname=copy_c_or_fortran_cname(to_memview), + dtype_is_object=int(to_memview.dtype.is_pyobject)), + requires=[copy_contents_new_utility]) + + +def get_axes_specs(env, axes): + ''' + get_axes_specs(env, axes) -> list of (access, packing) specs for each axis. + access is one of 'full', 'ptr' or 'direct' + packing is one of 'contig', 'strided' or 'follow' + ''' + + cythonscope = env.global_scope().context.cython_scope + cythonscope.load_cythonscope() + viewscope = cythonscope.viewscope + + access_specs = tuple([viewscope.lookup(name) + for name in ('full', 'direct', 'ptr')]) + packing_specs = tuple([viewscope.lookup(name) + for name in ('contig', 'strided', 'follow')]) + + is_f_contig, is_c_contig = False, False + default_access, default_packing = 'direct', 'strided' + cf_access, cf_packing = default_access, 'follow' + + axes_specs = [] + # analyse all axes. + for idx, axis in enumerate(axes): + if not axis.start.is_none: + raise CompileError(axis.start.pos, START_ERR) + + if not axis.stop.is_none: + raise CompileError(axis.stop.pos, STOP_ERR) + + if axis.step.is_none: + axes_specs.append((default_access, default_packing)) + + elif isinstance(axis.step, IntNode): + # the packing for the ::1 axis is contiguous, + # all others are cf_packing. + if axis.step.compile_time_value(env) != 1: + raise CompileError(axis.step.pos, STEP_ERR) + + axes_specs.append((cf_access, 'cfcontig')) + + elif isinstance(axis.step, (NameNode, AttributeNode)): + entry = _get_resolved_spec(env, axis.step) + if entry.name in view_constant_to_access_packing: + axes_specs.append(view_constant_to_access_packing[entry.name]) + else: + raise CompileError(axis.step.pos, INVALID_ERR) + + else: + raise CompileError(axis.step.pos, INVALID_ERR) + + # First, find out if we have a ::1 somewhere + contig_dim = 0 + is_contig = False + for idx, (access, packing) in enumerate(axes_specs): + if packing == 'cfcontig': + if is_contig: + raise CompileError(axis.step.pos, BOTH_CF_ERR) + + contig_dim = idx + axes_specs[idx] = (access, 'contig') + is_contig = True + + if is_contig: + # We have a ::1 somewhere, see if we're C or Fortran contiguous + if contig_dim == len(axes) - 1: + is_c_contig = True + else: + is_f_contig = True + + if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'): + raise CompileError(axes[contig_dim].pos, + "Fortran contiguous specifier must follow an indirect dimension") + + if is_c_contig: + # Contiguous in the last dimension, find the last indirect dimension + contig_dim = -1 + for idx, (access, packing) in enumerate(reversed(axes_specs)): + if access in ('ptr', 'full'): + contig_dim = len(axes) - idx - 1 + + # Replace 'strided' with 'follow' for any dimension following the last + # indirect dimension, the first dimension or the dimension following + # the ::1. + # int[::indirect, ::1, :, :] + # ^ ^ + # int[::indirect, :, :, ::1] + # ^ ^ + start = contig_dim + 1 + stop = len(axes) - is_c_contig + for idx, (access, packing) in enumerate(axes_specs[start:stop]): + idx = contig_dim + 1 + idx + if access != 'direct': + raise CompileError(axes[idx].pos, + "Indirect dimension may not follow " + "Fortran contiguous dimension") + if packing == 'contig': + raise CompileError(axes[idx].pos, + "Dimension may not be contiguous") + axes_specs[idx] = (access, cf_packing) + + if is_c_contig: + # For C contiguity, we need to fix the 'contig' dimension + # after the loop + a, p = axes_specs[-1] + axes_specs[-1] = a, 'contig' + + validate_axes_specs([axis.start.pos for axis in axes], + axes_specs, + is_c_contig, + is_f_contig) + + return axes_specs + + +def validate_axes(pos, axes): + if len(axes) >= Options.buffer_max_dims: + error(pos, "More dimensions than the maximum number" + " of buffer dimensions were used.") + return False + + return True + + +def is_cf_contig(specs): + is_c_contig = is_f_contig = False + + if len(specs) == 1 and specs == [('direct', 'contig')]: + is_c_contig = True + + elif (specs[-1] == ('direct','contig') and + all(axis == ('direct','follow') for axis in specs[:-1])): + # c_contiguous: 'follow', 'follow', ..., 'follow', 'contig' + is_c_contig = True + + elif (len(specs) > 1 and + specs[0] == ('direct','contig') and + all(axis == ('direct','follow') for axis in specs[1:])): + # f_contiguous: 'contig', 'follow', 'follow', ..., 'follow' + is_f_contig = True + + return is_c_contig, is_f_contig + + +def get_mode(specs): + is_c_contig, is_f_contig = is_cf_contig(specs) + + if is_c_contig: + return 'c' + elif is_f_contig: + return 'fortran' + + for access, packing in specs: + if access in ('ptr', 'full'): + return 'full' + + return 'strided' + +view_constant_to_access_packing = { + 'generic': ('full', 'strided'), + 'strided': ('direct', 'strided'), + 'indirect': ('ptr', 'strided'), + 'generic_contiguous': ('full', 'contig'), + 'contiguous': ('direct', 'contig'), + 'indirect_contiguous': ('ptr', 'contig'), +} + +def validate_axes_specs(positions, specs, is_c_contig, is_f_contig): + + packing_specs = ('contig', 'strided', 'follow') + access_specs = ('direct', 'ptr', 'full') + + # is_c_contig, is_f_contig = is_cf_contig(specs) + + has_contig = has_follow = has_strided = has_generic_contig = False + + last_indirect_dimension = -1 + for idx, (access, packing) in enumerate(specs): + if access == 'ptr': + last_indirect_dimension = idx + + for idx, (pos, (access, packing)) in enumerate(zip(positions, specs)): + + if not (access in access_specs and + packing in packing_specs): + raise CompileError(pos, "Invalid axes specification.") + + if packing == 'strided': + has_strided = True + elif packing == 'contig': + if has_contig: + raise CompileError(pos, "Only one direct contiguous " + "axis may be specified.") + + valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1 + if idx not in valid_contig_dims and access != 'ptr': + if last_indirect_dimension + 1 != len(specs) - 1: + dims = "dimensions %d and %d" % valid_contig_dims + else: + dims = "dimension %d" % valid_contig_dims[0] + + raise CompileError(pos, "Only %s may be contiguous and direct" % dims) + + has_contig = access != 'ptr' + elif packing == 'follow': + if has_strided: + raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.") + if not (is_c_contig or is_f_contig): + raise CompileError(pos, "Invalid use of the follow specifier.") + + if access in ('ptr', 'full'): + has_strided = False + +def _get_resolved_spec(env, spec): + # spec must be a NameNode or an AttributeNode + if isinstance(spec, NameNode): + return _resolve_NameNode(env, spec) + elif isinstance(spec, AttributeNode): + return _resolve_AttributeNode(env, spec) + else: + raise CompileError(spec.pos, INVALID_ERR) + +def _resolve_NameNode(env, node): + try: + resolved_name = env.lookup(node.name).name + except AttributeError: + raise CompileError(node.pos, INVALID_ERR) + + viewscope = env.global_scope().context.cython_scope.viewscope + entry = viewscope.lookup(resolved_name) + if entry is None: + raise CompileError(node.pos, NOT_CIMPORTED_ERR) + + return entry + +def _resolve_AttributeNode(env, node): + path = [] + while isinstance(node, AttributeNode): + path.insert(0, node.attribute) + node = node.obj + if isinstance(node, NameNode): + path.insert(0, node.name) + else: + raise CompileError(node.pos, EXPR_ERR) + modnames = path[:-1] + # must be at least 1 module name, o/w not an AttributeNode. + assert modnames + + scope = env + for modname in modnames: + mod = scope.lookup(modname) + if not mod or not mod.as_module: + raise CompileError( + node.pos, "undeclared name not builtin: %s" % modname) + scope = mod.as_module + + entry = scope.lookup(path[-1]) + if not entry: + raise CompileError(node.pos, "No such attribute '%s'" % path[-1]) + + return entry + +# +### Utility loading +# + +def load_memview_cy_utility(util_code_name, context=None, **kwargs): + return CythonUtilityCode.load(util_code_name, "MemoryView.pyx", + context=context, **kwargs) + +def load_memview_c_utility(util_code_name, context=None, **kwargs): + if context is None: + return UtilityCode.load(util_code_name, "MemoryView_C.c", **kwargs) + else: + return TempitaUtilityCode.load(util_code_name, "MemoryView_C.c", + context=context, **kwargs) + +def use_cython_array_utility_code(env): + cython_scope = env.global_scope().context.cython_scope + cython_scope.load_cythonscope() + cython_scope.viewscope.lookup('array_cwrapper').used = True + +context = { + 'memview_struct_name': memview_objstruct_cname, + 'max_dims': Options.buffer_max_dims, + 'memviewslice_name': memviewslice_cname, + 'memslice_init': PyrexTypes.MemoryViewSliceType.default_value, + 'THREAD_LOCKS_PREALLOCATED': 8, +} +memviewslice_declare_code = load_memview_c_utility( + "MemviewSliceStruct", + context=context, + requires=[]) + +atomic_utility = load_memview_c_utility("Atomics", context) + +memviewslice_init_code = load_memview_c_utility( + "MemviewSliceInit", + context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims), + requires=[memviewslice_declare_code, + atomic_utility], +) + +memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex") + +typeinfo_to_format_code = load_memview_cy_utility( + "BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code]) + +is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context) +overlapping_utility = load_memview_c_utility("OverlappingSlices", context) +copy_contents_new_utility = load_memview_c_utility( + "MemviewSliceCopyTemplate", + context, + requires=[], # require cython_array_utility_code +) + +view_utility_code = load_memview_cy_utility( + "View.MemoryView", + context=context, + requires=[Buffer.GetAndReleaseBufferUtilityCode(), + Buffer.buffer_struct_declare_code, + Buffer.buffer_formats_declare_code, + memviewslice_init_code, + is_contig_utility, + overlapping_utility, + copy_contents_new_utility, + ], +) +view_utility_allowlist = ('array', 'memoryview', 'array_cwrapper', + 'generic', 'strided', 'indirect', 'contiguous', + 'indirect_contiguous') + +memviewslice_declare_code.requires.append(view_utility_code) +copy_contents_new_utility.requires.append(view_utility_code) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ModuleNode.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ModuleNode.py new file mode 100644 index 0000000000000000000000000000000000000000..43c6b5f074e50a540730bb2e162d47abd9669866 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ModuleNode.py @@ -0,0 +1,4029 @@ +# +# Module parse tree node +# + +from __future__ import absolute_import + +import cython +cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object, + error=object, warning=object, py_object_type=object, UtilityCode=object, + EncodedString=object, re=object) + +from collections import defaultdict +import json +import operator +import os +import re +import sys + +from .PyrexTypes import CPtrType +from . import Future +from . import Annotate +from . import Code +from . import Naming +from . import Nodes +from . import Options +from . import TypeSlots +from . import PyrexTypes +from . import Pythran + +from .Errors import error, warning, CompileError +from .PyrexTypes import py_object_type +from ..Utils import open_new_file, replace_suffix, decode_filename, build_hex_version, is_cython_generated_file +from .Code import UtilityCode, IncludeCode, TempitaUtilityCode +from .StringEncoding import EncodedString, encoded_string_or_bytes_literal +from .Pythran import has_np_pythran + + +def replace_suffix_encoded(path, newsuf): + # calls replace suffix and returns a EncodedString or BytesLiteral with the encoding set + newpath = replace_suffix(path, newsuf) + return as_encoded_filename(newpath) + +def as_encoded_filename(path): + # wraps the path with either EncodedString or BytesLiteral (depending on its input type) + # and sets the encoding to the file system encoding + return encoded_string_or_bytes_literal(path, sys.getfilesystemencoding()) + + +def check_c_declarations_pxd(module_node): + module_node.scope.check_c_classes_pxd() + return module_node + + +def check_c_declarations(module_node): + module_node.scope.check_c_classes() + module_node.scope.check_c_functions() + return module_node + + +def generate_c_code_config(env, options): + if Options.annotate or options.annotate: + emit_linenums = False + else: + emit_linenums = options.emit_linenums + + if hasattr(options, "emit_code_comments"): + print('Warning: option emit_code_comments is deprecated. ' + 'Instead, use compiler directive emit_code_comments.') + + return Code.CCodeConfig( + emit_linenums=emit_linenums, + emit_code_comments=env.directives['emit_code_comments'], + c_line_in_traceback=options.c_line_in_traceback) + +# The code required to generate one comparison from another. +# The keys are (from, to). +# The comparison operator always goes first, with equality possibly second. +# The first value specifies if the comparison is inverted. The second is the +# logic op to use, and the third is if the equality is inverted or not. +TOTAL_ORDERING = { + # a > b from (not a < b) and (a != b) + ('__lt__', '__gt__'): (True, '&&', True), + # a <= b from (a < b) or (a == b) + ('__lt__', '__le__'): (False, '||', False), + # a >= b from (not a < b). + ('__lt__', '__ge__'): (True, '', None), + + # a >= b from (not a <= b) or (a == b) + ('__le__', '__ge__'): (True, '||', False), + # a < b, from (a <= b) and (a != b) + ('__le__', '__lt__'): (False, '&&', True), + # a > b from (not a <= b) + ('__le__', '__gt__'): (True, '', None), + + # a < b from (not a > b) and (a != b) + ('__gt__', '__lt__'): (True, '&&', True), + # a >= b from (a > b) or (a == b) + ('__gt__', '__ge__'): (False, '||', False), + # a <= b from (not a > b) + ('__gt__', '__le__'): (True, '', None), + + # Return a <= b from (not a >= b) or (a == b) + ('__ge__', '__le__'): (True, '||', False), + # a > b from (a >= b) and (a != b) + ('__ge__', '__gt__'): (False, '&&', True), + # a < b from (not a >= b) + ('__ge__', '__lt__'): (True, '', None), +} + + +class ModuleNode(Nodes.Node, Nodes.BlockNode): + # doc string or None + # body StatListNode + # + # referenced_modules [ModuleScope] + # full_module_name string + # + # scope The module scope. + # compilation_source A CompilationSource (see Main) + # directives Top-level compiler directives + + child_attrs = ["body"] + directives = None + # internal - used in merging + pxd_stats = None + utility_code_stats = None + + + def merge_in(self, tree, scope, stage, merge_scope=False): + # Merges in the contents of another tree, and possibly scope. With the + # current implementation below, this must be done right prior + # to code generation. + # Stage is one of "pxd" or "utility" to indicate pxd file or utility + # code. This helps define the order. + # + # Note: This way of doing it seems strange -- I believe the + # right concept is to split ModuleNode into a ModuleNode and a + # CodeGenerator, and tell that CodeGenerator to generate code + # from multiple sources. + assert isinstance(self.body, Nodes.StatListNode) + assert stage in ('pxd', 'utility') + + if self.pxd_stats is None: + self.pxd_stats = Nodes.StatListNode(self.body.pos, stats=[]) + self.utility_code_stats = Nodes.StatListNode(self.body.pos, stats=[]) + self.body.stats.insert(0, self.pxd_stats) + self.body.stats.insert(0, self.utility_code_stats) + + if scope.directives != self.scope.directives: + # merged in nodes should keep their original compiler directives + # (for example inline cdef functions) + tree = Nodes.CompilerDirectivesNode(tree.pos, body=tree, directives=scope.directives) + + target_stats = self.pxd_stats if stage == "pxd" else self.utility_code_stats + if isinstance(tree, Nodes.StatListNode): + target_stats.stats.extend(tree.stats) + else: + target_stats.stats.append(tree) + + self.scope.utility_code_list.extend(scope.utility_code_list) + + for inc in scope.c_includes.values(): + self.scope.process_include(inc) + + def extend_if_not_in(L1, L2): + for x in L2: + if x not in L1: + L1.append(x) + + extend_if_not_in(self.scope.included_files, scope.included_files) + + if merge_scope: + # Ensure that we don't generate import code for these entries! + for entry in scope.c_class_entries: + entry.type.module_name = self.full_module_name + entry.type.scope.directives["internal"] = True + + self.scope.merge_in(scope) + + def with_compiler_directives(self): + # When merging a utility code module into the user code we need to preserve + # the original compiler directives. This returns the body of the module node, + # wrapped in its set of directives. + body = Nodes.CompilerDirectivesNode(self.pos, directives=self.directives, body=self.body) + return body + + def analyse_declarations(self, env): + if has_np_pythran(env): + Pythran.include_pythran_generic(env) + if self.directives: + env.old_style_globals = self.directives['old_style_globals'] + if not Options.docstrings: + env.doc = self.doc = None + elif Options.embed_pos_in_docstring: + env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos)) + if self.doc is not None: + env.doc = EncodedString(env.doc + u'\n' + self.doc) + env.doc.encoding = self.doc.encoding + else: + env.doc = self.doc + env.directives = self.directives + + self.body.analyse_declarations(env) + + def prepare_utility_code(self): + # prepare any utility code that must be created before code generation + # specifically: CythonUtilityCode + env = self.scope + if env.has_import_star: + self.create_import_star_conversion_utility_code(env) + for name, entry in sorted(env.entries.items()): + if (entry.create_wrapper and entry.scope is env + and entry.is_type and (entry.type.is_enum or entry.type.is_cpp_enum)): + entry.type.create_type_wrapper(env) + + def process_implementation(self, options, result): + env = self.scope + env.return_type = PyrexTypes.c_void_type + self.referenced_modules = [] + self.find_referenced_modules(env, self.referenced_modules, {}) + self.sort_cdef_classes(env) + self.generate_c_code(env, options, result) + self.generate_h_code(env, options, result) + self.generate_api_code(env, options, result) + + def has_imported_c_functions(self): + for module in self.referenced_modules: + for entry in module.cfunc_entries: + if entry.defined_in_pxd: + return 1 + return 0 + + def assure_safe_target(self, path, allow_failed=False): + # Check for a common gotcha for new users: naming your .pyx file after the .c file you want to wrap + if not is_cython_generated_file(path, allow_failed=allow_failed, if_not_found=True): + # Raising a fatal CompileError instead of calling error() to prevent castrating an existing file. + raise CompileError( + self.pos, 'The output file already exists and does not look like it was generated by Cython: "%s"' % + os.path.basename(path)) + + def generate_h_code(self, env, options, result): + def h_entries(entries, api=0, pxd=0): + return [entry for entry in entries + if ((entry.visibility == 'public') or + (api and entry.api) or + (pxd and entry.defined_in_pxd))] + h_types = h_entries(env.type_entries, api=1) + h_vars = h_entries(env.var_entries) + h_funcs = h_entries(env.cfunc_entries) + h_extension_types = h_entries(env.c_class_entries) + + if h_types or h_vars or h_funcs or h_extension_types: + result.h_file = replace_suffix_encoded(result.c_file, ".h") + self.assure_safe_target(result.h_file) + + h_code_writer = Code.CCodeWriter() + c_code_config = generate_c_code_config(env, options) + globalstate = Code.GlobalState(h_code_writer, self, c_code_config) + globalstate.initialize_main_h_code() # in-case utility code is used in the header + h_code_start = globalstate.parts['h_code'] + h_code_main = globalstate.parts['type_declarations'] + h_code_end = globalstate.parts['end'] + if options.generate_pxi: + result.i_file = replace_suffix_encoded(result.c_file, ".pxi") + i_code = Code.PyrexCodeWriter(result.i_file) + else: + i_code = None + + h_code_start.put_generated_by() + h_guard = self.api_name(Naming.h_guard_prefix, env) + h_code_start.put_h_guard(h_guard) + h_code_start.putln("") + h_code_start.putln('#include "Python.h"') + self.generate_type_header_code(h_types, h_code_start) + if options.capi_reexport_cincludes: + self.generate_includes(env, [], h_code_start) + h_code_start.putln("") + api_guard = self.api_name(Naming.api_guard_prefix, env) + h_code_start.putln("#ifndef %s" % api_guard) + h_code_start.putln("") + self.generate_extern_c_macro_definition(h_code_start, env.is_cpp()) + h_code_start.putln("") + self.generate_dl_import_macro(h_code_start) + if h_extension_types: + h_code_main.putln("") + for entry in h_extension_types: + self.generate_cclass_header_code(entry.type, h_code_main) + if i_code: + self.generate_cclass_include_code(entry.type, i_code) + if h_funcs: + h_code_main.putln("") + for entry in h_funcs: + self.generate_public_declaration(entry, h_code_main, i_code) + if h_vars: + h_code_main.putln("") + for entry in h_vars: + self.generate_public_declaration(entry, h_code_main, i_code) + h_code_main.putln("") + h_code_main.putln("#endif /* !%s */" % api_guard) + h_code_main.putln("") + h_code_main.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */") + h_code_main.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */") + h_code_main.putln("") + h_code_main.putln("#if PY_MAJOR_VERSION < 3") + if env.module_name.isascii(): + py2_mod_name = env.module_name + else: + py2_mod_name = env.module_name.encode("ascii", errors="ignore").decode("utf-8") + h_code_main.putln('#error "Unicode module names are not supported in Python 2";') + h_code_main.putln("PyMODINIT_FUNC init%s(void);" % py2_mod_name) + h_code_main.putln("#else") + py3_mod_func_name = self.mod_init_func_cname('PyInit', env) + warning_string = EncodedString('Use PyImport_AppendInittab("%s", %s) instead of calling %s directly.' % ( + py2_mod_name, py3_mod_func_name, py3_mod_func_name)) + h_code_main.putln('/* WARNING: %s from Python 3.5 */' % warning_string.rstrip('.')) + h_code_main.putln("PyMODINIT_FUNC %s(void);" % py3_mod_func_name) + h_code_main.putln("") + h_code_main.putln("#if PY_VERSION_HEX >= 0x03050000 " + "&& (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) " + "|| (defined(__cplusplus) && __cplusplus >= 201402L))") + h_code_main.putln("#if defined(__cplusplus) && __cplusplus >= 201402L") + h_code_main.putln("[[deprecated(%s)]] inline" % warning_string.as_c_string_literal()) + h_code_main.putln("#elif defined(__GNUC__) || defined(__clang__)") + h_code_main.putln('__attribute__ ((__deprecated__(%s), __unused__)) __inline__' % ( + warning_string.as_c_string_literal())) + h_code_main.putln("#elif defined(_MSC_VER)") + h_code_main.putln('__declspec(deprecated(%s)) __inline' % ( + warning_string.as_c_string_literal())) + h_code_main.putln('#endif') + h_code_main.putln("static PyObject* __PYX_WARN_IF_%s_INIT_CALLED(PyObject* res) {" % py3_mod_func_name) + h_code_main.putln("return res;") + h_code_main.putln("}") + # Function call is converted to warning macro; uncalled (pointer) is not + h_code_main.putln('#define %s() __PYX_WARN_IF_%s_INIT_CALLED(%s())' % ( + py3_mod_func_name, py3_mod_func_name, py3_mod_func_name)) + h_code_main.putln('#endif') + h_code_main.putln('#endif') + + h_code_end.putln("") + h_code_end.putln("#endif /* !%s */" % h_guard) + + with open_new_file(result.h_file) as f: + h_code_writer.copyto(f) + + def generate_public_declaration(self, entry, h_code, i_code): + h_code.putln("%s %s;" % ( + Naming.extern_c_macro, + entry.type.declaration_code(entry.cname))) + if i_code: + i_code.putln("cdef extern %s" % ( + entry.type.declaration_code(entry.cname, pyrex=1))) + + def api_name(self, prefix, env): + api_name = self.punycode_module_name(prefix, env.qualified_name) + return api_name.replace(".", "__") + + def generate_api_code(self, env, options, result): + def api_entries(entries, pxd=0): + return [entry for entry in entries + if entry.api or (pxd and entry.defined_in_pxd)] + api_vars = api_entries(env.var_entries) + api_funcs = api_entries(env.cfunc_entries) + api_extension_types = api_entries(env.c_class_entries) + + if api_vars or api_funcs or api_extension_types: + result.api_file = replace_suffix_encoded(result.c_file, "_api.h") + self.assure_safe_target(result.api_file) + + h_code = Code.CCodeWriter() + c_code_config = generate_c_code_config(env, options) + Code.GlobalState(h_code, self, c_code_config) + h_code.put_generated_by() + api_guard = self.api_name(Naming.api_guard_prefix, env) + h_code.put_h_guard(api_guard) + # Work around https://bugs.python.org/issue4709 + h_code.putln('#ifdef __MINGW64__') + h_code.putln('#define MS_WIN64') + h_code.putln('#endif') + + h_code.putln('#include "Python.h"') + if result.h_file: + h_filename = os.path.basename(result.h_file) + h_filename = as_encoded_filename(h_filename) + h_code.putln('#include %s' % h_filename.as_c_string_literal()) + if api_extension_types: + h_code.putln("") + for entry in api_extension_types: + type = entry.type + h_code.putln("static PyTypeObject *%s = 0;" % type.typeptr_cname) + h_code.putln("#define %s (*%s)" % ( + type.typeobj_cname, type.typeptr_cname)) + if api_funcs: + h_code.putln("") + for entry in api_funcs: + type = CPtrType(entry.type) + cname = env.mangle(Naming.func_prefix_api, entry.name) + h_code.putln("static %s = 0;" % type.declaration_code(cname)) + h_code.putln("#define %s %s" % (entry.name, cname)) + if api_vars: + h_code.putln("") + for entry in api_vars: + type = CPtrType(entry.type) + cname = env.mangle(Naming.varptr_prefix_api, entry.name) + h_code.putln("static %s = 0;" % type.declaration_code(cname)) + h_code.putln("#define %s (*%s)" % (entry.name, cname)) + if api_vars: + h_code.put(UtilityCode.load_as_string("VoidPtrImport", "ImportExport.c")[1]) + if api_funcs: + h_code.put(UtilityCode.load_as_string("FunctionImport", "ImportExport.c")[1]) + if api_extension_types: + h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[0]) + h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[1]) + h_code.putln("") + h_code.putln("static int %s(void) {" % self.api_name("import", env)) + h_code.putln("PyObject *module = 0;") + h_code.putln('module = PyImport_ImportModule(%s);' % env.qualified_name.as_c_string_literal()) + h_code.putln("if (!module) goto bad;") + for entry in api_funcs: + cname = env.mangle(Naming.func_prefix_api, entry.name) + sig = entry.type.signature_string() + h_code.putln( + 'if (__Pyx_ImportFunction_%s(module, %s, (void (**)(void))&%s, "%s") < 0) goto bad;' + % (Naming.cyversion, entry.name.as_c_string_literal(), cname, sig)) + for entry in api_vars: + cname = env.mangle(Naming.varptr_prefix_api, entry.name) + sig = entry.type.empty_declaration_code() + h_code.putln( + 'if (__Pyx_ImportVoidPtr_%s(module, %s, (void **)&%s, "%s") < 0) goto bad;' + % (Naming.cyversion, entry.name.as_c_string_literal(), cname, sig)) + with ModuleImportGenerator(h_code, imported_modules={env.qualified_name: 'module'}) as import_generator: + for entry in api_extension_types: + self.generate_type_import_call(entry.type, h_code, import_generator, error_code="goto bad;") + h_code.putln("Py_DECREF(module); module = 0;") + h_code.putln("return 0;") + h_code.putln("bad:") + h_code.putln("Py_XDECREF(module);") + h_code.putln("return -1;") + h_code.putln("}") + h_code.putln("") + h_code.putln("#endif /* !%s */" % api_guard) + + f = open_new_file(result.api_file) + try: + h_code.copyto(f) + finally: + f.close() + + def generate_cclass_header_code(self, type, h_code): + h_code.putln("%s %s %s;" % ( + Naming.extern_c_macro, + PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"), + type.typeobj_cname)) + + def generate_cclass_include_code(self, type, i_code): + i_code.putln("cdef extern class %s.%s:" % ( + type.module_name, type.name)) + i_code.indent() + var_entries = type.scope.var_entries + if var_entries: + for entry in var_entries: + i_code.putln("cdef %s" % ( + entry.type.declaration_code(entry.cname, pyrex=1))) + else: + i_code.putln("pass") + i_code.dedent() + + def generate_c_code(self, env, options, result): + self.assure_safe_target(result.c_file, allow_failed=True) + modules = self.referenced_modules + + if Options.annotate or options.annotate: + show_entire_c_code = Options.annotate == "fullc" or options.annotate == "fullc" + rootwriter = Annotate.AnnotationCCodeWriter( + show_entire_c_code=show_entire_c_code, + source_desc=self.compilation_source.source_desc, + ) + else: + rootwriter = Code.CCodeWriter() + + c_code_config = generate_c_code_config(env, options) + + globalstate = Code.GlobalState( + rootwriter, self, + code_config=c_code_config, + common_utility_include_dir=options.common_utility_include_dir, + ) + globalstate.initialize_main_c_code() + h_code = globalstate['h_code'] + + self.generate_module_preamble(env, options, modules, result.embedded_metadata, h_code) + + globalstate.module_pos = self.pos + globalstate.directives = self.directives + + globalstate.use_utility_code(refnanny_utility_code) + + code = globalstate['before_global_var'] + code.putln('#define __Pyx_MODULE_NAME %s' % + self.full_module_name.as_c_string_literal()) + module_is_main = self.is_main_module_flag_cname() + code.putln("extern int %s;" % module_is_main) + code.putln("int %s = 0;" % module_is_main) + code.putln("") + code.putln("/* Implementation of %s */" % env.qualified_name.as_c_string_literal()) + + code = globalstate['late_includes'] + self.generate_includes(env, modules, code, early=False) + + code = globalstate['module_code'] + + self.generate_cached_builtins_decls(env, code) + + # generate normal variable and function definitions + self.generate_lambda_definitions(env, code) + self.generate_variable_definitions(env, code) + self.body.generate_function_definitions(env, code) + + code.mark_pos(None) + self.generate_typeobj_definitions(env, code) + self.generate_method_table(env, code) + if env.has_import_star: + self.generate_import_star(env, code) + + # initialise the macro to reduce the code size of one-time functionality + code.putln(UtilityCode.load_as_string("SmallCodeConfig", "ModuleSetupCode.c")[0].strip()) + + self.generate_module_state_start(env, globalstate['module_state']) + self.generate_module_state_defines(env, globalstate['module_state_defines']) + self.generate_module_state_clear(env, globalstate['module_state_clear']) + self.generate_module_state_traverse(env, globalstate['module_state_traverse']) + + # init_globals is inserted before this + self.generate_module_init_func(modules[:-1], env, globalstate['init_module']) + self.generate_module_cleanup_func(env, globalstate['cleanup_module']) + if Options.embed: + self.generate_main_method(env, globalstate['main_method']) + self.generate_filename_table(globalstate['filename_table']) + + self.generate_declarations_for_modules(env, modules, globalstate) + h_code.write('\n') + + for utilcode in env.utility_code_list[:]: + globalstate.use_utility_code(utilcode) + globalstate.finalize_main_c_code() + + self.generate_module_state_end(env, modules, globalstate) + + f = open_new_file(result.c_file) + try: + rootwriter.copyto(f) + finally: + f.close() + result.c_file_generated = 1 + if options.gdb_debug: + self._serialize_lineno_map(env, rootwriter) + if Options.annotate or options.annotate: + self._generate_annotations(rootwriter, result, options) + + def _generate_annotations(self, rootwriter, result, options): + self.annotate(rootwriter) + + coverage_xml_filename = Options.annotate_coverage_xml or options.annotate_coverage_xml + if coverage_xml_filename and os.path.exists(coverage_xml_filename): + try: + import xml.etree.cElementTree as ET + except ImportError: + import xml.etree.ElementTree as ET + coverage_xml = ET.parse(coverage_xml_filename).getroot() + for el in coverage_xml.iter(): + el.tail = None # save some memory + else: + coverage_xml = None + + rootwriter.save_annotation(result.main_source_file, result.c_file, coverage_xml=coverage_xml) + + # if we included files, additionally generate one annotation file for each + if not self.scope.included_files: + return + + search_include_file = self.scope.context.search_include_directories + target_dir = os.path.abspath(os.path.dirname(result.c_file)) + for included_file in self.scope.included_files: + target_file = os.path.abspath(os.path.join(target_dir, included_file)) + target_file_dir = os.path.dirname(target_file) + if not target_file_dir.startswith(target_dir): + # any other directories may not be writable => avoid trying + continue + source_file = search_include_file(included_file, source_pos=self.pos, include=True) + if not source_file: + continue + if target_file_dir != target_dir and not os.path.exists(target_file_dir): + try: + os.makedirs(target_file_dir) + except OSError as e: + import errno + if e.errno != errno.EEXIST: + raise + rootwriter.save_annotation(source_file, target_file, coverage_xml=coverage_xml) + + def _serialize_lineno_map(self, env, ccodewriter): + tb = env.context.gdb_debug_outputwriter + markers = ccodewriter.buffer.allmarkers() + + d = defaultdict(list) + for c_lineno, (src_desc, src_lineno) in enumerate(markers): + if src_lineno > 0 and src_desc.filename is not None: + d[src_desc, src_lineno].append(c_lineno + 1) + + tb.start('LineNumberMapping') + for (src_desc, src_lineno), c_linenos in sorted(d.items()): + assert src_desc.filename is not None + tb.add_entry( + 'LineNumber', + c_linenos=' '.join(map(str, c_linenos)), + src_path=src_desc.filename, + src_lineno=str(src_lineno), + ) + tb.end('LineNumberMapping') + tb.serialize() + + def find_referenced_modules(self, env, module_list, modules_seen): + if env not in modules_seen: + modules_seen[env] = 1 + for imported_module in env.cimported_modules: + self.find_referenced_modules(imported_module, module_list, modules_seen) + module_list.append(env) + + def sort_types_by_inheritance(self, type_dict, type_order, getkey): + subclasses = defaultdict(list) # maps type key to list of subclass keys + for key in type_order: + new_entry = type_dict[key] + # collect all base classes to check for children + base = new_entry.type.base_type + while base: + base_key = getkey(base) + subclasses[base_key].append(key) + base_entry = type_dict.get(base_key) + if base_entry is None: + break + base = base_entry.type.base_type + + # Simple topological sort using recursive DFS, based on + # https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search + seen = set() + result = [] + def dfs(u): + if u in seen: + return + seen.add(u) + for v in subclasses[getkey(u.type)]: + dfs(type_dict[v]) + result.append(u) + + for key in reversed(type_order): + dfs(type_dict[key]) + + result.reverse() + return result + + def sort_type_hierarchy(self, module_list, env): + # poor developer's OrderedDict + vtab_dict, vtab_dict_order = {}, [] + vtabslot_dict, vtabslot_dict_order = {}, [] + + for module in module_list: + for entry in module.c_class_entries: + if entry.used and not entry.in_cinclude: + type = entry.type + key = type.vtabstruct_cname + if not key: + continue + if key in vtab_dict: + # FIXME: this should *never* happen, but apparently it does + # for Cython generated utility code + from .UtilityCode import NonManglingModuleScope + assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope) + assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope) + else: + vtab_dict[key] = entry + vtab_dict_order.append(key) + all_defined_here = module is env + for entry in module.type_entries: + if entry.used and (all_defined_here or entry.defined_in_pxd): + type = entry.type + if type.is_extension_type and not entry.in_cinclude: + type = entry.type + key = type.objstruct_cname + assert key not in vtabslot_dict, key + vtabslot_dict[key] = entry + vtabslot_dict_order.append(key) + + def vtabstruct_cname(entry_type): + return entry_type.vtabstruct_cname + vtab_list = self.sort_types_by_inheritance( + vtab_dict, vtab_dict_order, vtabstruct_cname) + + def objstruct_cname(entry_type): + return entry_type.objstruct_cname + vtabslot_list = self.sort_types_by_inheritance( + vtabslot_dict, vtabslot_dict_order, objstruct_cname) + + return (vtab_list, vtabslot_list) + + def sort_cdef_classes(self, env): + key_func = operator.attrgetter('objstruct_cname') + entry_dict, entry_order = {}, [] + for entry in env.c_class_entries: + key = key_func(entry.type) + assert key not in entry_dict, key + entry_dict[key] = entry + entry_order.append(key) + env.c_class_entries[:] = self.sort_types_by_inheritance( + entry_dict, entry_order, key_func) + + def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code): + # TODO: Why are these separated out? + for entry in vtabslot_list: + self.generate_objstruct_predeclaration(entry.type, code) + vtabslot_entries = set(vtabslot_list) + ctuple_names = set() + for module in modules: + definition = module is env + type_entries = [] + for entry in module.type_entries: + if entry.type.is_ctuple and entry.used: + if entry.name not in ctuple_names: + ctuple_names.add(entry.name) + type_entries.append(entry) + elif definition or entry.defined_in_pxd: + type_entries.append(entry) + type_entries = [t for t in type_entries if t not in vtabslot_entries] + self.generate_type_header_code(type_entries, code) + for entry in vtabslot_list: + self.generate_objstruct_definition(entry.type, code) + self.generate_typeobj_predeclaration(entry, code) + for entry in vtab_list: + self.generate_typeobj_predeclaration(entry, code) + self.generate_exttype_vtable_struct(entry, code) + self.generate_exttype_vtabptr_declaration(entry, code) + self.generate_exttype_final_methods_declaration(entry, code) + + def generate_declarations_for_modules(self, env, modules, globalstate): + typecode = globalstate['type_declarations'] + typecode.putln("") + typecode.putln("/*--- Type declarations ---*/") + # This is to work around the fact that array.h isn't part of the C-API, + # but we need to declare it earlier than utility code. + if 'cpython.array' in [m.qualified_name for m in modules]: + typecode.putln('#ifndef _ARRAYARRAY_H') + typecode.putln('struct arrayobject;') + typecode.putln('typedef struct arrayobject arrayobject;') + typecode.putln('#endif') + vtab_list, vtabslot_list = self.sort_type_hierarchy(modules, env) + self.generate_type_definitions( + env, modules, vtab_list, vtabslot_list, typecode) + modulecode = globalstate['module_declarations'] + for module in modules: + defined_here = module is env + modulecode.putln("") + modulecode.putln("/* Module declarations from %s */" % module.qualified_name.as_c_string_literal()) + self.generate_c_class_declarations(module, modulecode, defined_here, globalstate) + self.generate_cvariable_declarations(module, modulecode, defined_here) + self.generate_cfunction_declarations(module, modulecode, defined_here) + + @staticmethod + def _put_setup_code(code, name): + code.put(UtilityCode.load_as_string(name, "ModuleSetupCode.c")[1]) + + def generate_module_preamble(self, env, options, cimported_modules, metadata, code): + code.put_generated_by() + if metadata: + code.putln("/* BEGIN: Cython Metadata") + code.putln(json.dumps(metadata, indent=4, sort_keys=True)) + code.putln("END: Cython Metadata */") + code.putln("") + + code.putln("#ifndef PY_SSIZE_T_CLEAN") + code.putln("#define PY_SSIZE_T_CLEAN") + code.putln("#endif /* PY_SSIZE_T_CLEAN */") + self._put_setup_code(code, "InitLimitedAPI") + + for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey): + if inc.location == inc.INITIAL: + inc.write(code) + code.putln("#ifndef Py_PYTHON_H") + code.putln(" #error Python headers needed to compile C extensions, " + "please install development version of Python.") + code.putln("#elif PY_VERSION_HEX < 0x02070000 || " + "(0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)") + code.putln(" #error Cython requires Python 2.7+ or Python 3.3+.") + code.putln("#else") + code.globalstate["end"].putln("#endif /* Py_PYTHON_H */") + + from .. import __version__ + code.putln('#if defined(CYTHON_LIMITED_API) && CYTHON_LIMITED_API') # CYTHON_COMPILING_IN_LIMITED_API not yet defined + # The limited API makes some significant changes to data structures, so we don't + # want to shared implementation compiled with and without the limited API. + code.putln('#define __PYX_EXTRA_ABI_MODULE_NAME "limited"') + code.putln('#else') + code.putln('#define __PYX_EXTRA_ABI_MODULE_NAME ""') + code.putln('#endif') + code.putln('#define CYTHON_ABI "%s" __PYX_EXTRA_ABI_MODULE_NAME' % + __version__.replace('.', '_')) + code.putln('#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI') + code.putln('#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."') + code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__)) + code.putln("#define CYTHON_FUTURE_DIVISION %d" % ( + Future.division in env.context.future_directives)) + + self._put_setup_code(code, "CModulePreamble") + if env.context.options.cplus: + self._put_setup_code(code, "CppInitCode") + else: + self._put_setup_code(code, "CInitCode") + self._put_setup_code(code, "PythonCompatibility") + self._put_setup_code(code, "MathInitCode") + + if options.c_line_in_traceback: + cinfo = "%s = %s; " % (Naming.clineno_cname, Naming.line_c_macro) + else: + cinfo = "" + code.putln("#define __PYX_MARK_ERR_POS(f_index, lineno) \\") + # Using "(void)cname" to prevent "unused" warnings. + code.putln(" { %s = %s[f_index]; (void)%s; %s = lineno; (void)%s; %s (void)%s; }" % ( + Naming.filename_cname, Naming.filetable_cname, Naming.filename_cname, + Naming.lineno_cname, Naming.lineno_cname, + cinfo, + Naming.clineno_cname, + )) + code.putln("#define __PYX_ERR(f_index, lineno, Ln_error) \\") + code.putln(" { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }") + + code.putln("") + self.generate_extern_c_macro_definition(code, env.is_cpp()) + code.putln("") + + code.putln("#define %s" % self.api_name(Naming.h_guard_prefix, env)) + code.putln("#define %s" % self.api_name(Naming.api_guard_prefix, env)) + code.putln("/* Early includes */") + self.generate_includes(env, cimported_modules, code, late=False) + code.putln("") + code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)") + code.putln("#define CYTHON_WITHOUT_ASSERTIONS") + code.putln("#endif") + code.putln("") + + if env.directives['ccomplex']: + code.putln("") + code.putln("#if !defined(CYTHON_CCOMPLEX)") + code.putln("#define CYTHON_CCOMPLEX 1") + code.putln("#endif") + code.putln("") + code.put(UtilityCode.load_as_string("UtilityFunctionPredeclarations", "ModuleSetupCode.c")[0]) + + c_string_type = env.directives['c_string_type'] + c_string_encoding = env.directives['c_string_encoding'] + if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding: + error(self.pos, "a default encoding must be provided if c_string_type is not a byte type") + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII %s' % int(c_string_encoding == 'ascii')) + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 %s' % + int(c_string_encoding.replace('-', '').lower() == 'utf8')) + if c_string_encoding == 'default': + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 1') + else: + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT ' + '(PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)') + code.putln('#define __PYX_DEFAULT_STRING_ENCODING "%s"' % c_string_encoding) + if c_string_type == 'bytearray': + c_string_func_name = 'ByteArray' + else: + c_string_func_name = c_string_type.title() + code.putln('#define __Pyx_PyObject_FromString __Pyx_Py%s_FromString' % c_string_func_name) + code.putln('#define __Pyx_PyObject_FromStringAndSize __Pyx_Py%s_FromStringAndSize' % c_string_func_name) + code.put(UtilityCode.load_as_string("TypeConversions", "TypeConversion.c")[0]) + env.use_utility_code(UtilityCode.load_cached("FormatTypeName", "ObjectHandling.c")) + + # These utility functions are assumed to exist and used elsewhere. + PyrexTypes.c_long_type.create_to_py_utility_code(env) + PyrexTypes.c_long_type.create_from_py_utility_code(env) + PyrexTypes.c_int_type.create_from_py_utility_code(env) + + code.put(Nodes.branch_prediction_macros) + code.putln('static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }') + code.putln('') + code.putln('#if !CYTHON_USE_MODULE_STATE') + code.putln('static PyObject *%s = NULL;' % env.module_cname) + if Options.pre_import is not None: + code.putln('static PyObject *%s;' % Naming.preimport_cname) + code.putln('#endif') + + code.putln('static int %s;' % Naming.lineno_cname) + code.putln('static int %s = 0;' % Naming.clineno_cname) + code.putln('static const char * %s = %s;' % (Naming.cfilenm_cname, Naming.file_c_macro)) + code.putln('static const char *%s;' % Naming.filename_cname) + + env.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c")) + if has_np_pythran(env): + env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp")) + + def generate_extern_c_macro_definition(self, code, is_cpp): + name = Naming.extern_c_macro + code.putln("#ifdef CYTHON_EXTERN_C") + # make sure that user overrides always take precedence + code.putln(' #undef %s' % name) + code.putln(' #define %s CYTHON_EXTERN_C' % name) + code.putln("#elif defined(%s)" % name) + code.putln(" #ifdef _MSC_VER") + code.putln(" #pragma message (\"Please do not define the '%s' macro externally. Use 'CYTHON_EXTERN_C' instead.\")" % name) + code.putln(" #else") + code.putln(" #warning Please do not define the '%s' macro externally. Use 'CYTHON_EXTERN_C' instead." % name) + code.putln(" #endif") + code.putln("#else") + if is_cpp: + code.putln(' #define %s extern "C++"' % name) + else: + code.putln(" #ifdef __cplusplus") + code.putln(' #define %s extern "C"' % name) + code.putln(" #else") + code.putln(" #define %s extern" % name) + code.putln(" #endif") + code.putln("#endif") + + def generate_dl_import_macro(self, code): + code.putln("#ifndef DL_IMPORT") + code.putln(" #define DL_IMPORT(_T) _T") + code.putln("#endif") + + def generate_includes(self, env, cimported_modules, code, early=True, late=True): + for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey): + if inc.location == inc.EARLY: + if early: + inc.write(code) + elif inc.location == inc.LATE: + if late: + inc.write(code) + if early: + code.putln_openmp("#include ") + + def generate_filename_table(self, code): + from os.path import isabs, basename + code.putln("") + code.putln("static const char *%s[] = {" % Naming.filetable_cname) + if code.globalstate.filename_list: + for source_desc in code.globalstate.filename_list: + file_path = source_desc.get_filenametable_entry() + if isabs(file_path): + file_path = basename(file_path) # never include absolute paths + escaped_filename = file_path.replace("\\", "\\\\").replace('"', r'\"') + escaped_filename = as_encoded_filename(escaped_filename) + code.putln('%s,' % escaped_filename.as_c_string_literal()) + else: + # Some C compilers don't like an empty array + code.putln("0") + code.putln("};") + + def generate_type_predeclarations(self, env, code): + pass + + def generate_type_header_code(self, type_entries, code): + # Generate definitions of structs/unions/enums/typedefs/objstructs. + #self.generate_gcc33_hack(env, code) # Is this still needed? + # Forward declarations + for entry in type_entries: + if not entry.in_cinclude: + #print "generate_type_header_code:", entry.name, repr(entry.type) ### + type = entry.type + if type.is_typedef: # Must test this first! + pass + elif type.is_struct_or_union or type.is_cpp_class: + self.generate_struct_union_predeclaration(entry, code) + elif type.is_ctuple and not type.is_fused and entry.used: + self.generate_struct_union_predeclaration(entry.type.struct_entry, code) + elif type.is_extension_type: + self.generate_objstruct_predeclaration(type, code) + # Actual declarations + for entry in type_entries: + if not entry.in_cinclude: + #print "generate_type_header_code:", entry.name, repr(entry.type) ### + type = entry.type + if type.is_typedef: # Must test this first! + self.generate_typedef(entry, code) + elif type.is_enum or type.is_cpp_enum: + self.generate_enum_definition(entry, code) + elif type.is_struct_or_union: + self.generate_struct_union_definition(entry, code) + elif type.is_ctuple and not type.is_fused and entry.used: + self.generate_struct_union_definition(entry.type.struct_entry, code) + elif type.is_cpp_class: + self.generate_cpp_class_definition(entry, code) + elif type.is_extension_type: + self.generate_objstruct_definition(type, code) + + def generate_gcc33_hack(self, env, code): + # Workaround for spurious warning generation in gcc 3.3 + code.putln("") + for entry in env.c_class_entries: + type = entry.type + if not type.typedef_flag: + name = type.objstruct_cname + if name.startswith("__pyx_"): + tail = name[6:] + else: + tail = name + code.putln("typedef struct %s __pyx_gcc33_%s;" % ( + name, tail)) + + def generate_typedef(self, entry, code): + base_type = entry.type.typedef_base_type + enclosing_scope = entry.scope + if base_type.is_numeric and not enclosing_scope.is_cpp_class_scope: + try: + writer = code.globalstate['numeric_typedefs'] + except KeyError: + writer = code + else: + writer = code + writer.mark_pos(entry.pos) + writer.putln("typedef %s;" % base_type.declaration_code(entry.cname)) + + def sue_predeclaration(self, type, kind, name): + if type.typedef_flag: + return "%s %s;\ntypedef %s %s %s;" % ( + kind, name, + kind, name, name) + else: + return "%s %s;" % (kind, name) + + def generate_struct_union_predeclaration(self, entry, code): + type = entry.type + if type.is_cpp_class and type.templates: + code.putln("template " % ", typename ".join( + [T.empty_declaration_code() for T in type.templates])) + code.putln(self.sue_predeclaration(type, type.kind, type.cname)) + + def sue_header_footer(self, type, kind, name): + header = "%s %s {" % (kind, name) + footer = "};" + return header, footer + + def generate_struct_union_definition(self, entry, code): + code.mark_pos(entry.pos) + type = entry.type + scope = type.scope + if scope: + kind = type.kind + packed = type.is_struct and type.packed + if packed: + kind = "%s %s" % (type.kind, "__Pyx_PACKED") + code.globalstate.use_utility_code(packed_struct_utility_code) + header, footer = \ + self.sue_header_footer(type, kind, type.cname) + if packed: + code.putln("#if defined(__SUNPRO_C)") + code.putln(" #pragma pack(1)") + code.putln("#elif !defined(__GNUC__)") + code.putln(" #pragma pack(push, 1)") + code.putln("#endif") + code.putln(header) + var_entries = scope.var_entries + for attr in var_entries: + code.putln( + "%s;" % attr.type.declaration_code(attr.cname)) + code.putln(footer) + if packed: + code.putln("#if defined(__SUNPRO_C)") + code.putln(" #pragma pack()") + code.putln("#elif !defined(__GNUC__)") + code.putln(" #pragma pack(pop)") + code.putln("#endif") + + def generate_cpp_class_definition(self, entry, code): + code.mark_pos(entry.pos) + type = entry.type + scope = type.scope + if scope: + if type.templates: + code.putln("template " % ", class ".join( + [T.empty_declaration_code() for T in type.templates])) + # Just let everything be public. + code.put("struct %s" % type.cname) + if type.base_classes: + base_class_decl = ", public ".join( + [base_class.empty_declaration_code() for base_class in type.base_classes]) + code.put(" : public %s" % base_class_decl) + code.putln(" {") + self.generate_type_header_code(scope.type_entries, code) + py_attrs = [e for e in scope.entries.values() + if e.type.is_pyobject and not e.is_inherited] + has_virtual_methods = False + constructor = None + destructor = None + for attr in scope.var_entries: + if attr.type.is_cfunction and attr.type.is_static_method: + code.put("static ") + elif attr.name == "": + constructor = attr + elif attr.name == "": + destructor = attr + elif attr.type.is_cfunction: + code.put("virtual ") + has_virtual_methods = True + code.putln("%s;" % attr.type.declaration_code(attr.cname)) + is_implementing = 'init_module' in code.globalstate.parts + if constructor or py_attrs: + if constructor: + arg_decls = [] + arg_names = [] + for arg in constructor.type.original_args[ + :len(constructor.type.args)-constructor.type.optional_arg_count]: + arg_decls.append(arg.declaration_code()) + arg_names.append(arg.cname) + if constructor.type.optional_arg_count: + arg_decls.append(constructor.type.op_arg_struct.declaration_code(Naming.optional_args_cname)) + arg_names.append(Naming.optional_args_cname) + if not arg_decls: + arg_decls = ["void"] + else: + arg_decls = ["void"] + arg_names = [] + if is_implementing: + code.putln("%s(%s) {" % (type.cname, ", ".join(arg_decls))) + if py_attrs: + code.put_ensure_gil() + for attr in py_attrs: + code.put_init_var_to_py_none(attr, nanny=False) + if constructor: + code.putln("%s(%s);" % (constructor.cname, ", ".join(arg_names))) + if py_attrs: + code.put_release_ensured_gil() + code.putln("}") + else: + code.putln("%s(%s);" % (type.cname, ", ".join(arg_decls))) + if destructor or py_attrs or has_virtual_methods: + if has_virtual_methods: + code.put("virtual ") + if is_implementing: + code.putln("~%s() {" % type.cname) + if py_attrs: + code.put_ensure_gil() + if destructor: + code.putln("%s();" % destructor.cname) + if py_attrs: + for attr in py_attrs: + code.put_var_xdecref(attr, nanny=False) + code.put_release_ensured_gil() + code.putln("}") + else: + code.putln("~%s();" % type.cname) + if py_attrs: + # Also need copy constructor and assignment operators. + if is_implementing: + code.putln("%s(const %s& __Pyx_other) {" % (type.cname, type.cname)) + code.put_ensure_gil() + for attr in scope.var_entries: + if not attr.type.is_cfunction: + code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname)) + code.put_var_incref(attr, nanny=False) + code.put_release_ensured_gil() + code.putln("}") + code.putln("%s& operator=(const %s& __Pyx_other) {" % (type.cname, type.cname)) + code.putln("if (this != &__Pyx_other) {") + code.put_ensure_gil() + for attr in scope.var_entries: + if not attr.type.is_cfunction: + code.put_var_xdecref(attr, nanny=False) + code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname)) + code.put_var_incref(attr, nanny=False) + code.put_release_ensured_gil() + code.putln("}") + code.putln("return *this;") + code.putln("}") + else: + code.putln("%s(const %s& __Pyx_other);" % (type.cname, type.cname)) + code.putln("%s& operator=(const %s& __Pyx_other);" % (type.cname, type.cname)) + code.putln("};") + + def generate_enum_definition(self, entry, code): + code.mark_pos(entry.pos) + type = entry.type + name = entry.cname or entry.name or "" + + kind = "enum class" if entry.type.is_cpp_enum else "enum" + header, footer = self.sue_header_footer(type, kind, name) + code.putln(header) + enum_values = entry.enum_values + if not enum_values: + error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block") + else: + last_entry = enum_values[-1] + # this does not really generate code, just builds the result value + for value_entry in enum_values: + if value_entry.value_node is not None: + value_entry.value_node.generate_evaluation_code(code) + + for value_entry in enum_values: + if value_entry.value_node is None: + value_code = value_entry.cname.split("::")[-1] + else: + value_code = ("%s = %s" % ( + value_entry.cname.split("::")[-1], + value_entry.value_node.result())) + if value_entry is not last_entry: + value_code += "," + code.putln(value_code) + code.putln(footer) + + if entry.type.is_enum: + if entry.type.typedef_flag: + # Not pre-declared. + code.putln("typedef enum %s %s;" % (name, name)) + + def generate_typeobj_predeclaration(self, entry, code): + code.putln("") + name = entry.type.typeobj_cname + if name: + if entry.visibility == 'extern' and not entry.in_cinclude: + code.putln("%s %s %s;" % ( + Naming.extern_c_macro, + PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"), + name)) + elif entry.visibility == 'public': + code.putln("%s %s %s;" % ( + Naming.extern_c_macro, + PyrexTypes.public_decl("PyTypeObject", "DL_EXPORT"), + name)) + # ??? Do we really need the rest of this? ??? + #else: + # code.putln("static PyTypeObject %s;" % name) + + def generate_exttype_vtable_struct(self, entry, code): + if not entry.used: + return + + code.mark_pos(entry.pos) + # Generate struct declaration for an extension type's vtable. + type = entry.type + scope = type.scope + + self.specialize_fused_types(scope) + + if type.vtabstruct_cname: + code.putln("") + code.putln("struct %s {" % type.vtabstruct_cname) + if type.base_type and type.base_type.vtabstruct_cname: + code.putln("struct %s %s;" % ( + type.base_type.vtabstruct_cname, + Naming.obj_base_cname)) + for method_entry in scope.cfunc_entries: + if not method_entry.is_inherited: + code.putln("%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname)) + code.putln("};") + + def generate_exttype_vtabptr_declaration(self, entry, code): + if not entry.used: + return + + code.mark_pos(entry.pos) + # Generate declaration of pointer to an extension type's vtable. + type = entry.type + if type.vtabptr_cname: + code.putln("static struct %s *%s;" % ( + type.vtabstruct_cname, + type.vtabptr_cname)) + + def generate_exttype_final_methods_declaration(self, entry, code): + if not entry.used: + return + + code.mark_pos(entry.pos) + # Generate final methods prototypes + type = entry.type + for method_entry in entry.type.scope.cfunc_entries: + if not method_entry.is_inherited and method_entry.final_func_cname: + declaration = method_entry.type.declaration_code( + method_entry.final_func_cname) + modifiers = code.build_function_modifiers(method_entry.func_modifiers) + code.putln("static %s%s;" % (modifiers, declaration)) + + def generate_objstruct_predeclaration(self, type, code): + if not type.scope: + return + code.putln(self.sue_predeclaration(type, "struct", type.objstruct_cname)) + + def generate_objstruct_definition(self, type, code): + code.mark_pos(type.pos) + # Generate object struct definition for an + # extension type. + if not type.scope: + return # Forward declared but never defined + header, footer = \ + self.sue_header_footer(type, "struct", type.objstruct_cname) + code.putln(header) + base_type = type.base_type + if base_type: + basestruct_cname = base_type.objstruct_cname + if basestruct_cname == "PyTypeObject": + # User-defined subclasses of type are heap allocated. + basestruct_cname = "PyHeapTypeObject" + code.putln( + "%s%s %s;" % ( + ("struct ", "")[base_type.typedef_flag], + basestruct_cname, + Naming.obj_base_cname)) + else: + code.putln( + "PyObject_HEAD") + if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname): + code.putln( + "struct %s *%s;" % ( + type.vtabstruct_cname, + type.vtabslot_cname)) + for attr in type.scope.var_entries: + if attr.is_declared_generic: + attr_type = py_object_type + else: + attr_type = attr.type + if attr.is_cpp_optional: + decl = attr_type.cpp_optional_declaration_code(attr.cname) + else: + decl = attr_type.declaration_code(attr.cname) + type.scope.use_entry_utility_code(attr) + code.putln("%s;" % decl) + code.putln(footer) + if type.objtypedef_cname is not None: + # Only for exposing public typedef name. + code.putln("typedef struct %s %s;" % (type.objstruct_cname, type.objtypedef_cname)) + + def generate_c_class_declarations(self, env, code, definition, globalstate): + module_state = globalstate['module_state'] + module_state_defines = globalstate['module_state_defines'] + module_state_clear = globalstate['module_state_clear'] + module_state_traverse = globalstate['module_state_traverse'] + module_state_typeobj = module_state.insertion_point() + module_state_defines_typeobj = module_state_defines.insertion_point() + for writer in [module_state_typeobj, module_state_defines_typeobj]: + writer.putln("#if CYTHON_USE_MODULE_STATE") + for entry in env.c_class_entries: + if definition or entry.defined_in_pxd: + module_state.putln("PyTypeObject *%s;" % entry.type.typeptr_cname) + module_state_defines.putln("#define %s %s->%s" % ( + entry.type.typeptr_cname, + Naming.modulestateglobal_cname, + entry.type.typeptr_cname)) + module_state_clear.putln( + "Py_CLEAR(clear_module_state->%s);" % + entry.type.typeptr_cname) + module_state_traverse.putln( + "Py_VISIT(traverse_module_state->%s);" % + entry.type.typeptr_cname) + if entry.type.typeobj_cname is not None: + module_state_typeobj.putln("PyObject *%s;" % entry.type.typeobj_cname) + module_state_defines_typeobj.putln("#define %s %s->%s" % ( + entry.type.typeobj_cname, + Naming.modulestateglobal_cname, + entry.type.typeobj_cname)) + module_state_clear.putln( + "Py_CLEAR(clear_module_state->%s);" % ( + entry.type.typeobj_cname)) + module_state_traverse.putln( + "Py_VISIT(traverse_module_state->%s);" % ( + entry.type.typeobj_cname)) + for writer in [module_state_typeobj, module_state_defines_typeobj]: + writer.putln("#endif") + + def generate_cvariable_declarations(self, env, code, definition): + if env.is_cython_builtin: + return + for entry in env.var_entries: + if (entry.in_cinclude or entry.in_closure or + (entry.visibility == 'private' and not (entry.defined_in_pxd or entry.used))): + continue + + storage_class = None + dll_linkage = None + init = None + + if entry.visibility == 'extern': + storage_class = Naming.extern_c_macro + dll_linkage = "DL_IMPORT" + elif entry.visibility == 'public': + storage_class = Naming.extern_c_macro + if definition: + dll_linkage = "DL_EXPORT" + else: + dll_linkage = "DL_IMPORT" + elif entry.visibility == 'private': + storage_class = "static" + dll_linkage = None + if entry.init is not None: + init = entry.type.literal_code(entry.init) + type = entry.type + cname = entry.cname + + if entry.defined_in_pxd and not definition: + storage_class = "static" + dll_linkage = None + type = CPtrType(type) + cname = env.mangle(Naming.varptr_prefix, entry.name) + init = 0 + + if storage_class: + code.put("%s " % storage_class) + if entry.is_cpp_optional: + code.put(type.cpp_optional_declaration_code( + cname, dll_linkage=dll_linkage)) + else: + code.put(type.declaration_code( + cname, dll_linkage=dll_linkage)) + if init is not None: + code.put_safe(" = %s" % init) + code.putln(";") + if entry.cname != cname: + code.putln("#define %s (*%s)" % (entry.cname, cname)) + env.use_entry_utility_code(entry) + + def generate_cfunction_declarations(self, env, code, definition): + for entry in env.cfunc_entries: + from_pyx = Options.cimport_from_pyx and not entry.visibility == 'extern' + if (entry.used + or entry.visibility == 'public' + or entry.api + or from_pyx): + generate_cfunction_declaration(entry, env, code, definition) + + def generate_variable_definitions(self, env, code): + for entry in env.var_entries: + if not entry.in_cinclude and entry.visibility == "public": + code.put(entry.type.declaration_code(entry.cname)) + if entry.init is not None: + init = entry.type.literal_code(entry.init) + code.put_safe(" = %s" % init) + code.putln(";") + + def generate_typeobj_definitions(self, env, code): + full_module_name = env.qualified_name + for entry in env.c_class_entries: + #print "generate_typeobj_definitions:", entry.name + #print "...visibility =", entry.visibility + if entry.visibility != 'extern': + type = entry.type + scope = type.scope + if scope: # could be None if there was an error + self.generate_exttype_vtable(scope, code) + self.generate_new_function(scope, code, entry) + self.generate_del_function(scope, code) + self.generate_dealloc_function(scope, code) + + if scope.needs_gc(): + self.generate_traverse_function(scope, code, entry) + if scope.needs_tp_clear(): + self.generate_clear_function(scope, code, entry) + if scope.defines_any_special(["__getitem__"]): + self.generate_getitem_int_function(scope, code) + if scope.defines_any_special(["__setitem__", "__delitem__"]): + self.generate_ass_subscript_function(scope, code) + if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]): + warning(self.pos, + "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, " + "use __getitem__, __setitem__, and __delitem__ instead", 1) + code.putln("#if PY_MAJOR_VERSION >= 3") + code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.") + code.putln("#endif") + if scope.defines_any_special(["__setslice__", "__delslice__"]): + self.generate_ass_slice_function(scope, code) + if scope.defines_any_special(["__getattr__", "__getattribute__"]): + self.generate_getattro_function(scope, code) + if scope.defines_any_special(["__setattr__", "__delattr__"]): + self.generate_setattro_function(scope, code) + if scope.defines_any_special(["__get__"]): + self.generate_descr_get_function(scope, code) + if scope.defines_any_special(["__set__", "__delete__"]): + self.generate_descr_set_function(scope, code) + if not scope.is_closure_class_scope and scope.defines_any(["__dict__"]): + self.generate_dict_getter_function(scope, code) + + if scope.defines_any_special(TypeSlots.richcmp_special_methods): + self.generate_richcmp_function(scope, code) + elif 'total_ordering' in scope.directives: + # Warn if this is used when it can't have any effect. + warning(scope.parent_type.pos, + "total_ordering directive used, but no comparison and equality methods defined") + + for slot in TypeSlots.get_slot_table(code.globalstate.directives).PyNumberMethods: + if slot.is_binop and scope.defines_any_special(slot.user_methods): + self.generate_binop_function(scope, slot, code, entry.pos) + + self.generate_property_accessors(scope, code) + self.generate_method_table(scope, code) + self.generate_getset_table(scope, code) + code.putln("#if CYTHON_USE_TYPE_SPECS") + self.generate_typeobj_spec(entry, code) + code.putln("#else") + self.generate_typeobj_definition(full_module_name, entry, code) + code.putln("#endif") + + def generate_exttype_vtable(self, scope, code): + # Generate the definition of an extension type's vtable. + type = scope.parent_type + if type.vtable_cname: + code.putln("static struct %s %s;" % ( + type.vtabstruct_cname, + type.vtable_cname)) + + def generate_self_cast(self, scope, code): + type = scope.parent_type + code.putln( + "%s = (%s)o;" % ( + type.declaration_code("p"), + type.empty_declaration_code())) + + def generate_new_function(self, scope, code, cclass_entry): + tp_slot = TypeSlots.ConstructorSlot("tp_new", "__cinit__") + slot_func = scope.mangle_internal("tp_new") + if tp_slot.slot_code(scope) != slot_func: + return # never used + + type = scope.parent_type + base_type = type.base_type + + have_entries, (py_attrs, py_buffers, memoryview_slices) = \ + scope.get_refcounted_entries() + is_final_type = scope.parent_type.is_final_type + if scope.is_internal: + # internal classes (should) never need None inits, normal zeroing will do + py_attrs = [] + cpp_constructable_attrs = [entry for entry in scope.var_entries if entry.type.needs_cpp_construction] + + cinit_func_entry = scope.lookup_here("__cinit__") + if cinit_func_entry and not cinit_func_entry.is_special: + cinit_func_entry = None + + if base_type or (cinit_func_entry and not cinit_func_entry.trivial_signature): + unused_marker = '' + else: + unused_marker = 'CYTHON_UNUSED ' + + if base_type: + freelist_size = 0 # not currently supported + else: + freelist_size = scope.directives.get('freelist', 0) + freelist_name = scope.mangle_internal(Naming.freelist_name) + freecount_name = scope.mangle_internal(Naming.freecount_name) + + decls = code.globalstate['decls'] + decls.putln("static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/" % + slot_func) + code.putln("") + if freelist_size: + code.putln("#if CYTHON_USE_FREELISTS") + code.putln("static %s[%d];" % ( + scope.parent_type.declaration_code(freelist_name), + freelist_size)) + code.putln("static int %s = 0;" % freecount_name) + code.putln("#endif") + code.putln("") + code.putln( + "static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {" % ( + slot_func, unused_marker, unused_marker)) + + need_self_cast = (type.vtabslot_cname or + (py_buffers or memoryview_slices or py_attrs) or + cpp_constructable_attrs) + if need_self_cast: + code.putln("%s;" % scope.parent_type.declaration_code("p")) + if base_type: + tp_new = TypeSlots.get_base_slot_function(scope, tp_slot) + if tp_new is None: + tp_new = "__Pyx_PyType_GetSlot(%s, tp_new, newfunc)" % base_type.typeptr_cname + code.putln("PyObject *o = %s(t, a, k);" % tp_new) + else: + code.putln("PyObject *o;") + code.putln("#if CYTHON_COMPILING_IN_LIMITED_API") + code.putln("allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc);") + code.putln("o = alloc_func(t, 0);") + code.putln("#else") + if freelist_size: + code.globalstate.use_utility_code( + UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + if is_final_type: + type_safety_check = '' + else: + type_safety_check = ' & (int)(!__Pyx_PyType_HasFeature(t, (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))' + obj_struct = type.declaration_code("", deref=True) + code.putln("#if CYTHON_USE_FREELISTS") + code.putln( + "if (likely((int)(%s > 0) & (int)(t->tp_basicsize == sizeof(%s))%s)) {" % ( + freecount_name, obj_struct, type_safety_check)) + code.putln("o = (PyObject*)%s[--%s];" % ( + freelist_name, freecount_name)) + code.putln("memset(o, 0, sizeof(%s));" % obj_struct) + code.putln("(void) PyObject_INIT(o, t);") + if scope.needs_gc(): + code.putln("PyObject_GC_Track(o);") + code.putln("} else") + code.putln("#endif") + code.putln("{") + if not is_final_type: + code.putln("if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) {") + code.putln("o = (*t->tp_alloc)(t, 0);") + if not is_final_type: + code.putln("} else {") + code.putln("o = (PyObject *) PyBaseObject_Type.tp_new(t, %s, 0);" % Naming.empty_tuple) + code.putln("}") + code.putln("if (unlikely(!o)) return 0;") + if freelist_size and not base_type: + code.putln('}') + if not base_type: + code.putln("#endif") + if need_self_cast: + code.putln("p = %s;" % type.cast_code("o")) + #if need_self_cast: + # self.generate_self_cast(scope, code) + + # from this point on, ensure DECREF(o) on failure + needs_error_cleanup = False + + if type.vtabslot_cname: + vtab_base_type = type + while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname: + vtab_base_type = vtab_base_type.base_type + if vtab_base_type is not type: + struct_type_cast = "(struct %s*)" % vtab_base_type.vtabstruct_cname + else: + struct_type_cast = "" + code.putln("p->%s = %s%s;" % ( + type.vtabslot_cname, + struct_type_cast, type.vtabptr_cname)) + + for entry in cpp_constructable_attrs: + if entry.is_cpp_optional: + decl_code = entry.type.cpp_optional_declaration_code("") + else: + decl_code = entry.type.empty_declaration_code() + code.putln("new((void*)&(p->%s)) %s();" % ( + entry.cname, decl_code)) + + for entry in py_attrs: + if entry.name == "__dict__": + needs_error_cleanup = True + code.put("p->%s = PyDict_New(); if (unlikely(!p->%s)) goto bad;" % ( + entry.cname, entry.cname)) + else: + code.put_init_var_to_py_none(entry, "p->%s", nanny=False) + + for entry in memoryview_slices: + code.putln("p->%s.data = NULL;" % entry.cname) + code.putln("p->%s.memview = NULL;" % entry.cname) + + for entry in py_buffers: + code.putln("p->%s.obj = NULL;" % entry.cname) + + if cclass_entry.cname == '__pyx_memoryviewslice': + code.putln("p->from_slice.memview = NULL;") + + if cinit_func_entry: + if cinit_func_entry.trivial_signature: + cinit_args = "o, %s, NULL" % Naming.empty_tuple + else: + cinit_args = "o, a, k" + needs_error_cleanup = True + code.putln("if (unlikely(%s(%s) < 0)) goto bad;" % ( + cinit_func_entry.func_cname, cinit_args)) + + code.putln( + "return o;") + if needs_error_cleanup: + code.putln("bad:") + code.put_decref_clear("o", py_object_type, nanny=False) + code.putln("return NULL;") + code.putln( + "}") + + def generate_del_function(self, scope, code): + tp_slot = TypeSlots.get_slot_by_name("tp_finalize", scope.directives) + slot_func_cname = scope.mangle_internal("tp_finalize") + if tp_slot.slot_code(scope) != slot_func_cname: + return # never used + + entry = scope.lookup_here("__del__") + if entry is None or not entry.is_special: + return # nothing to wrap + code.putln("") + + if tp_slot.used_ifdef: + code.putln("#if %s" % tp_slot.used_ifdef) + code.putln("static void %s(PyObject *o) {" % slot_func_cname) + code.putln("PyObject *etype, *eval, *etb;") + code.putln("PyErr_Fetch(&etype, &eval, &etb);") + code.putln("%s(o);" % entry.func_cname) + code.putln("PyErr_Restore(etype, eval, etb);") + code.putln("}") + if tp_slot.used_ifdef: + code.putln("#endif") + + def generate_dealloc_function(self, scope, code): + tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__') + slot_func = scope.mangle_internal("tp_dealloc") + base_type = scope.parent_type.base_type + if tp_slot.slot_code(scope) != slot_func: + return # never used + + slot_func_cname = scope.mangle_internal("tp_dealloc") + code.putln("") + code.putln( + "static void %s(PyObject *o) {" % slot_func_cname) + + is_final_type = scope.parent_type.is_final_type + needs_gc = scope.needs_gc() + needs_trashcan = scope.needs_trashcan() + + weakref_slot = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None + if weakref_slot not in scope.var_entries: + weakref_slot = None + + dict_slot = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None + if dict_slot not in scope.var_entries: + dict_slot = None + + _, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries() + cpp_destructable_attrs = [entry for entry in scope.var_entries + if entry.type.needs_cpp_construction] + + if py_attrs or cpp_destructable_attrs or memoryview_slices or weakref_slot or dict_slot: + self.generate_self_cast(scope, code) + + if not is_final_type or scope.may_have_finalize(): + # in Py3.4+, call tp_finalize() as early as possible + code.putln("#if CYTHON_USE_TP_FINALIZE") + if needs_gc: + finalised_check = '!__Pyx_PyObject_GC_IsFinalized(o)' + else: + finalised_check = ( + '(!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))') + code.putln( + "if (unlikely(" + "(PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE))" + " && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && %s) {" % finalised_check) + + code.putln("if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == %s) {" % slot_func_cname) + # if instance was resurrected by finaliser, return + code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;") + code.putln("}") + code.putln("}") + code.putln("#endif") + + if needs_gc: + # We must mark this object as (gc) untracked while tearing + # it down, lest the garbage collection is invoked while + # running this destructor. + code.putln("PyObject_GC_UnTrack(o);") + + if needs_trashcan: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyTrashcan", "ExtensionTypes.c")) + code.putln("__Pyx_TRASHCAN_BEGIN(o, %s)" % slot_func_cname) + + if weakref_slot: + # We must clean the weakreferences before calling the user's __dealloc__ + # because if the __dealloc__ releases the GIL, a weakref can be + # dereferenced accessing the object in an inconsistent state or + # resurrecting it. + code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);") + + # call the user's __dealloc__ + self.generate_usr_dealloc_call(scope, code) + + if dict_slot: + code.putln("if (p->__dict__) PyDict_Clear(p->__dict__);") + + for entry in cpp_destructable_attrs: + code.putln("__Pyx_call_destructor(p->%s);" % entry.cname) + + for entry in (py_attrs + memoryview_slices): + code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False, + clear_before_decref=True, have_gil=True) + + if base_type: + base_cname = base_type.typeptr_cname + tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot) + if tp_dealloc is not None: + if needs_gc and base_type.scope and base_type.scope.needs_gc(): + # We know that the base class uses GC, so probably expects it to be tracked. + # Undo the untracking above. + code.putln("PyObject_GC_Track(o);") + code.putln("%s(o);" % tp_dealloc) + elif base_type.is_builtin_type: + if needs_gc and base_type.scope and base_type.scope.needs_gc(): + # We know that the base class uses GC, so probably expects it to be tracked. + # Undo the untracking above. + code.putln("PyObject_GC_Track(o);") + code.putln("__Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o);" % base_cname) + else: + if needs_gc: + # We don't know if the base class uses GC or not, so must find out at runtime + # whether we should undo the untracking above or not. + code.putln("#if PY_MAJOR_VERSION < 3") + # Py2 lacks guarantees that the type pointer is still valid if we dealloc the object + # at system exit time. Thus, we need an extra NULL check. + code.putln("if (!(%s) || PyType_IS_GC(%s)) PyObject_GC_Track(o);" % (base_cname, base_cname)) + code.putln("#else") + code.putln("if (PyType_IS_GC(%s)) PyObject_GC_Track(o);" % base_cname) + code.putln("#endif") + # This is an externally defined type. Calling through the + # cimported base type pointer directly interacts badly with + # the module cleanup, which may already have cleared it. + # In that case, fall back to traversing the type hierarchy. + code.putln("if (likely(%s)) __Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o); " + "else __Pyx_call_next_tp_dealloc(o, %s);" % ( + base_cname, base_cname, slot_func_cname)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c")) + else: + freelist_size = scope.directives.get('freelist', 0) + if freelist_size: + freelist_name = scope.mangle_internal(Naming.freelist_name) + freecount_name = scope.mangle_internal(Naming.freecount_name) + + if is_final_type: + type_safety_check = '' + else: + type_safety_check = ( + ' & (int)(!__Pyx_PyType_HasFeature(Py_TYPE(o), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))') + + type = scope.parent_type + code.putln("#if CYTHON_USE_FREELISTS") + code.putln( + "if (((int)(%s < %d) & (int)(Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % ( + freecount_name, + freelist_size, + type.declaration_code("", deref=True), + type_safety_check)) + code.putln("%s[%s++] = %s;" % ( + freelist_name, freecount_name, type.cast_code("o"))) + code.putln("} else") + code.putln("#endif") + code.putln("{") + code.putln("#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY") + # Asking for PyType_GetSlot(..., Py_tp_free) seems to cause an error in pypy + code.putln("(*Py_TYPE(o)->tp_free)(o);") + code.putln("#else") + code.putln("{") + code.putln("freefunc tp_free = (freefunc)PyType_GetSlot(Py_TYPE(o), Py_tp_free);") + code.putln("if (tp_free) tp_free(o);") + code.putln("}") + code.putln("#endif") + if freelist_size: + code.putln("}") + + if needs_trashcan: + code.putln("__Pyx_TRASHCAN_END") + + code.putln( + "}") + + def generate_usr_dealloc_call(self, scope, code): + entry = scope.lookup_here("__dealloc__") + if not entry or not entry.is_special: + return + + code.putln("{") + code.putln("PyObject *etype, *eval, *etb;") + code.putln("PyErr_Fetch(&etype, &eval, &etb);") + # increase the refcount while we are calling into user code + # to prevent recursive deallocation + code.putln("__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);") + code.putln("%s(o);" % entry.func_cname) + code.putln("__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);") + code.putln("PyErr_Restore(etype, eval, etb);") + code.putln("}") + + def generate_traverse_function(self, scope, code, cclass_entry): + tp_slot = TypeSlots.GCDependentSlot("tp_traverse") + slot_func = scope.mangle_internal("tp_traverse") + base_type = scope.parent_type.base_type + if tp_slot.slot_code(scope) != slot_func: + return # never used + code.putln("") + code.putln( + "static int %s(PyObject *o, visitproc v, void *a) {" % slot_func) + + have_entries, (py_attrs, py_buffers, memoryview_slices) = ( + scope.get_refcounted_entries(include_gc_simple=False)) + + if base_type or py_attrs: + code.putln("int e;") + + if py_attrs or py_buffers: + self.generate_self_cast(scope, code) + + if base_type: + # want to call it explicitly if possible so inlining can be performed + static_call = TypeSlots.get_base_slot_function(scope, tp_slot) + if static_call: + code.putln("e = %s(o, v, a); if (e) return e;" % static_call) + elif base_type.is_builtin_type: + base_cname = base_type.typeptr_cname + code.putln("if (!%s->tp_traverse); else { e = %s->tp_traverse(o,v,a); if (e) return e; }" % ( + base_cname, base_cname)) + else: + # This is an externally defined type. Calling through the + # cimported base type pointer directly interacts badly with + # the module cleanup, which may already have cleared it. + # In that case, fall back to traversing the type hierarchy. + base_cname = base_type.typeptr_cname + code.putln( + "e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : " + "__Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % ( + base_cname, base_cname, base_cname, slot_func)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c")) + + for entry in py_attrs: + var_code = "p->%s" % entry.cname + var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code) + code.putln("if (%s) {" % var_code) + code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject) + code.putln("}") + + # Traverse buffer exporting objects. + # Note: not traversing memoryview attributes of memoryview slices! + # When triggered by the GC, it would cause multiple visits (gc_refs + # subtractions which is not matched by its reference count!) + for entry in py_buffers: + cname = entry.cname + ".obj" + code.putln("if (p->%s) {" % cname) + code.putln("e = (*v)(p->%s, a); if (e) return e;" % cname) + code.putln("}") + + code.putln("return 0;") + code.putln("}") + + def generate_clear_function(self, scope, code, cclass_entry): + tp_slot = TypeSlots.get_slot_by_name("tp_clear", scope.directives) + slot_func = scope.mangle_internal("tp_clear") + base_type = scope.parent_type.base_type + if tp_slot.slot_code(scope) != slot_func: + return # never used + + have_entries, (py_attrs, py_buffers, memoryview_slices) = ( + scope.get_refcounted_entries(include_gc_simple=False)) + + if py_attrs or py_buffers or base_type: + unused = '' + else: + unused = 'CYTHON_UNUSED ' + + code.putln("") + code.putln("static int %s(%sPyObject *o) {" % (slot_func, unused)) + + if py_attrs and Options.clear_to_none: + code.putln("PyObject* tmp;") + + if py_attrs or py_buffers: + self.generate_self_cast(scope, code) + + if base_type: + # want to call it explicitly if possible so inlining can be performed + static_call = TypeSlots.get_base_slot_function(scope, tp_slot) + if static_call: + code.putln("%s(o);" % static_call) + elif base_type.is_builtin_type: + base_cname = base_type.typeptr_cname + code.putln("if (!%s->tp_clear); else %s->tp_clear(o);" % ( + base_cname, base_cname)) + else: + # This is an externally defined type. Calling through the + # cimported base type pointer directly interacts badly with + # the module cleanup, which may already have cleared it. + # In that case, fall back to traversing the type hierarchy. + base_cname = base_type.typeptr_cname + code.putln( + "if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % ( + base_cname, base_cname, base_cname, slot_func)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c")) + + if Options.clear_to_none: + for entry in py_attrs: + name = "p->%s" % entry.cname + code.putln("tmp = ((PyObject*)%s);" % name) + if entry.is_declared_generic: + code.put_init_to_py_none(name, py_object_type, nanny=False) + else: + code.put_init_to_py_none(name, entry.type, nanny=False) + code.putln("Py_XDECREF(tmp);") + else: + for entry in py_attrs: + code.putln("Py_CLEAR(p->%s);" % entry.cname) + + for entry in py_buffers: + # Note: shouldn't this call __Pyx_ReleaseBuffer ?? + code.putln("Py_CLEAR(p->%s.obj);" % entry.cname) + + if cclass_entry.cname == '__pyx_memoryviewslice': + code.putln("__PYX_XCLEAR_MEMVIEW(&p->from_slice, 1);") + + code.putln("return 0;") + code.putln("}") + + def generate_getitem_int_function(self, scope, code): + # This function is put into the sq_item slot when + # a __getitem__ method is present. It converts its + # argument to a Python integer and calls mp_subscript. + code.putln( + "static PyObject *%s(PyObject *o, Py_ssize_t i) {" % ( + scope.mangle_internal("sq_item"))) + code.putln( + "PyObject *r;") + code.putln( + "PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;") + code.putln( + "r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);") + code.putln( + "Py_DECREF(x);") + code.putln( + "return r;") + code.putln( + "}") + + def generate_ass_subscript_function(self, scope, code): + # Setting and deleting an item are both done through + # the ass_subscript method, so we dispatch to user's __setitem__ + # or __delitem__, or raise an exception. + base_type = scope.parent_type.base_type + set_entry = scope.lookup_here("__setitem__") + del_entry = scope.lookup_here("__delitem__") + code.putln("") + code.putln( + "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( + scope.mangle_internal("mp_ass_subscript"))) + code.putln( + "if (v) {") + if set_entry: + code.putln("return %s(o, i, v);" % set_entry.func_cname) + else: + code.putln( + "__Pyx_TypeName o_type_name;") + self.generate_guarded_basetype_call( + base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code) + code.putln( + "o_type_name = __Pyx_PyType_GetName(Py_TYPE(o));") + code.putln( + "PyErr_Format(PyExc_NotImplementedError,") + code.putln( + ' "Subscript assignment not supported by " __Pyx_FMT_TYPENAME, o_type_name);') + code.putln( + "__Pyx_DECREF_TypeName(o_type_name);") + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "else {") + if del_entry: + code.putln( + "return %s(o, i);" % ( + del_entry.func_cname)) + else: + code.putln( + "__Pyx_TypeName o_type_name;") + self.generate_guarded_basetype_call( + base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code) + code.putln( + "o_type_name = __Pyx_PyType_GetName(Py_TYPE(o));") + code.putln( + "PyErr_Format(PyExc_NotImplementedError,") + code.putln( + ' "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);') + code.putln( + "__Pyx_DECREF_TypeName(o_type_name);") + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "}") + + def generate_guarded_basetype_call( + self, base_type, substructure, slot, args, code): + if base_type: + base_tpname = base_type.typeptr_cname + if substructure: + code.putln( + "if (%s->%s && %s->%s->%s)" % ( + base_tpname, substructure, base_tpname, substructure, slot)) + code.putln( + " return %s->%s->%s(%s);" % ( + base_tpname, substructure, slot, args)) + else: + code.putln( + "if (%s->%s)" % ( + base_tpname, slot)) + code.putln( + " return %s->%s(%s);" % ( + base_tpname, slot, args)) + + def generate_ass_slice_function(self, scope, code): + # Setting and deleting a slice are both done through + # the ass_slice method, so we dispatch to user's __setslice__ + # or __delslice__, or raise an exception. + base_type = scope.parent_type.base_type + set_entry = scope.lookup_here("__setslice__") + del_entry = scope.lookup_here("__delslice__") + code.putln("") + code.putln( + "static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" % ( + scope.mangle_internal("sq_ass_slice"))) + code.putln( + "if (v) {") + if set_entry: + code.putln( + "return %s(o, i, j, v);" % ( + set_entry.func_cname)) + else: + code.putln( + "__Pyx_TypeName o_type_name;") + self.generate_guarded_basetype_call( + base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code) + code.putln( + "o_type_name = __Pyx_PyType_GetName(Py_TYPE(o));") + code.putln( + "PyErr_Format(PyExc_NotImplementedError,") + code.putln( + ' "2-element slice assignment not supported by " __Pyx_FMT_TYPENAME, o_type_name);') + code.putln( + "__Pyx_DECREF_TypeName(o_type_name);") + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "else {") + if del_entry: + code.putln( + "return %s(o, i, j);" % ( + del_entry.func_cname)) + else: + code.putln( + "__Pyx_TypeName o_type_name;") + self.generate_guarded_basetype_call( + base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code) + code.putln( + "o_type_name = __Pyx_PyType_GetName(Py_TYPE(o));") + code.putln( + "PyErr_Format(PyExc_NotImplementedError,") + code.putln( + ' "2-element slice deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);') + code.putln( + "__Pyx_DECREF_TypeName(o_type_name);") + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "}") + + def generate_richcmp_function(self, scope, code): + if scope.lookup_here("__richcmp__"): + # user implemented, nothing to do + return + # otherwise, we have to generate it from the Python special methods + richcmp_cfunc = scope.mangle_internal("tp_richcompare") + code.putln("") + code.putln("static PyObject *%s(PyObject *o1, PyObject *o2, int op) {" % richcmp_cfunc) + code.putln("switch (op) {") + + class_scopes = [] + cls = scope.parent_type + while cls is not None and not cls.entry.visibility == 'extern': + class_scopes.append(cls.scope) + cls = cls.scope.parent_type.base_type + assert scope in class_scopes + + extern_parent = None + if cls and cls.entry.visibility == 'extern': + # need to call up into base classes as we may not know all implemented comparison methods + extern_parent = cls if cls.typeptr_cname else scope.parent_type.base_type + + total_ordering = 'total_ordering' in scope.directives + + comp_entry = {} + + for cmp_method in TypeSlots.richcmp_special_methods: + for class_scope in class_scopes: + entry = class_scope.lookup_here(cmp_method) + if entry is not None: + comp_entry[cmp_method] = entry + break + + if total_ordering: + # Check this is valid - we must have at least 1 operation defined. + comp_names = [from_name for from_name, to_name in TOTAL_ORDERING if from_name in comp_entry] + if not comp_names: + if '__eq__' not in comp_entry and '__ne__' not in comp_entry: + warning(scope.parent_type.pos, + "total_ordering directive used, but no comparison and equality methods defined") + else: + warning(scope.parent_type.pos, + "total_ordering directive used, but no comparison methods defined") + total_ordering = False + else: + if '__eq__' not in comp_entry and '__ne__' not in comp_entry: + warning(scope.parent_type.pos, "total_ordering directive used, but no equality method defined") + total_ordering = False + + # Same priority as functools, prefers + # __lt__ to __le__ to __gt__ to __ge__ + ordering_source = max(comp_names) + + for cmp_method in TypeSlots.richcmp_special_methods: + cmp_type = cmp_method.strip('_').upper() # e.g. "__eq__" -> EQ + entry = comp_entry.get(cmp_method) + if entry is None and (not total_ordering or cmp_type in ('NE', 'EQ')): + # No definition, fall back to superclasses. + # eq/ne methods shouldn't use the total_ordering code. + continue + + code.putln("case Py_%s: {" % cmp_type) + if entry is None: + assert total_ordering + # We need to generate this from the other methods. + invert_comp, comp_op, invert_equals = TOTAL_ORDERING[ordering_source, cmp_method] + + # First we always do the comparison. + code.putln("PyObject *ret;") + code.putln("ret = %s(o1, o2);" % comp_entry[ordering_source].func_cname) + code.putln("if (likely(ret && ret != Py_NotImplemented)) {") + code.putln("int order_res = __Pyx_PyObject_IsTrue(ret);") + code.putln("Py_DECREF(ret);") + code.putln("if (unlikely(order_res < 0)) return NULL;") + # We may need to check equality too. For some combos it's never required. + if invert_equals is not None: + # Implement the and/or check with an if. + if comp_op == '&&': + code.putln("if (%s order_res) {" % ('!!' if invert_comp else '!')) + code.putln("ret = __Pyx_NewRef(Py_False);") + code.putln("} else {") + elif comp_op == '||': + code.putln("if (%s order_res) {" % ('!' if invert_comp else '')) + code.putln("ret = __Pyx_NewRef(Py_True);") + code.putln("} else {") + else: + raise AssertionError('Unknown op %s' % (comp_op, )) + if '__eq__' in comp_entry: + eq_func = '__eq__' + else: + # Fall back to NE, which is defined here. + eq_func = '__ne__' + invert_equals = not invert_equals + + code.putln("ret = %s(o1, o2);" % comp_entry[eq_func].func_cname) + code.putln("if (likely(ret && ret != Py_NotImplemented)) {") + code.putln("int eq_res = __Pyx_PyObject_IsTrue(ret);") + code.putln("Py_DECREF(ret);") + code.putln("if (unlikely(eq_res < 0)) return NULL;") + if invert_equals: + code.putln("ret = eq_res ? Py_False : Py_True;") + else: + code.putln("ret = eq_res ? Py_True : Py_False;") + code.putln("Py_INCREF(ret);") + code.putln("}") # equals success + code.putln("}") # Needs to try equals + else: + # Convert direct to a boolean. + if invert_comp: + code.putln("ret = order_res ? Py_False : Py_True;") + else: + code.putln("ret = order_res ? Py_True : Py_False;") + code.putln("Py_INCREF(ret);") + code.putln("}") # comp_op + code.putln("return ret;") + else: + code.putln("return %s(o1, o2);" % entry.func_cname) + code.putln("}") # Case + + if '__eq__' in comp_entry and '__ne__' not in comp_entry and not extern_parent: + code.putln("case Py_NE: {") + code.putln("PyObject *ret;") + # Python itself does not do this optimisation, it seems... + #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);") + code.putln("ret = %s(o1, o2);" % comp_entry['__eq__'].func_cname) + code.putln("if (likely(ret && ret != Py_NotImplemented)) {") + code.putln("int b = __Pyx_PyObject_IsTrue(ret);") + code.putln("Py_DECREF(ret);") + code.putln("if (unlikely(b < 0)) return NULL;") + code.putln("ret = (b) ? Py_False : Py_True;") + code.putln("Py_INCREF(ret);") + code.putln("}") + code.putln("return ret;") + code.putln("}") + + code.putln("default: {") + if extern_parent and extern_parent.typeptr_cname: + code.putln("if (likely(%s->tp_richcompare)) return %s->tp_richcompare(o1, o2, op);" % ( + extern_parent.typeptr_cname, extern_parent.typeptr_cname)) + code.putln("return __Pyx_NewRef(Py_NotImplemented);") + code.putln("}") + + code.putln("}") # switch + code.putln("}") + + def generate_binop_function(self, scope, slot, code, pos): + func_name = scope.mangle_internal(slot.slot_name) + if scope.directives['c_api_binop_methods']: + code.putln('#define %s %s' % (func_name, slot.left_slot.slot_code(scope))) + return + + code.putln() + preprocessor_guard = slot.preprocessor_guard_code() + if preprocessor_guard: + code.putln(preprocessor_guard) + + if slot.left_slot.signature in (TypeSlots.binaryfunc, TypeSlots.ibinaryfunc): + slot_type = 'binaryfunc' + extra_arg = extra_arg_decl = '' + elif slot.left_slot.signature in (TypeSlots.powternaryfunc, TypeSlots.ipowternaryfunc): + slot_type = 'ternaryfunc' + extra_arg = ', extra_arg' + extra_arg_decl = ', PyObject* extra_arg' + else: + error(pos, "Unexpected type slot signature: %s" % slot) + return + + def get_slot_method_cname(method_name): + entry = scope.lookup(method_name) + return entry.func_cname if entry and entry.is_special else None + + def call_slot_method(method_name, reverse): + func_cname = get_slot_method_cname(method_name) + if func_cname: + return "%s(%s%s)" % ( + func_cname, + "right, left" if reverse else "left, right", + extra_arg) + else: + return '%s_maybe_call_slot(__Pyx_PyType_GetSlot(%s, tp_base, PyTypeObject*), left, right %s)' % ( + func_name, + scope.parent_type.typeptr_cname, + extra_arg) + + if get_slot_method_cname(slot.left_slot.method_name) and not get_slot_method_cname(slot.right_slot.method_name): + warning(pos, "Extension type implements %s() but not %s(). " + "The behaviour has changed from previous Cython versions to match Python semantics. " + "You can implement both special methods in a backwards compatible way." % ( + slot.left_slot.method_name, + slot.right_slot.method_name, + )) + + overloads_left = int(bool(get_slot_method_cname(slot.left_slot.method_name))) + overloads_right = int(bool(get_slot_method_cname(slot.right_slot.method_name))) + code.putln( + TempitaUtilityCode.load_as_string( + "BinopSlot", "ExtensionTypes.c", + context={ + "func_name": func_name, + "slot_name": slot.slot_name, + "overloads_left": overloads_left, + "overloads_right": overloads_right, + "call_left": call_slot_method(slot.left_slot.method_name, reverse=False), + "call_right": call_slot_method(slot.right_slot.method_name, reverse=True), + "type_cname": scope.parent_type.typeptr_cname, + "slot_type": slot_type, + "extra_arg": extra_arg, + "extra_arg_decl": extra_arg_decl, + })[1]) + if preprocessor_guard: + code.putln("#endif") + + def generate_getattro_function(self, scope, code): + # First try to get the attribute using __getattribute__, if defined, or + # PyObject_GenericGetAttr. + # + # If that raises an AttributeError, call the __getattr__ if defined. + # + # In both cases, defined can be in this class, or any base class. + def lookup_here_or_base(n, tp=None, extern_return=None): + # Recursive lookup + if tp is None: + tp = scope.parent_type + r = tp.scope.lookup_here(n) + if r is None: + if tp.is_external and extern_return is not None: + return extern_return + if tp.base_type is not None: + return lookup_here_or_base(n, tp.base_type) + return r + + has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern") + getattr_entry = lookup_here_or_base("__getattr__") + getattribute_entry = lookup_here_or_base("__getattribute__") + code.putln("") + code.putln( + "static PyObject *%s(PyObject *o, PyObject *n) {" % ( + scope.mangle_internal("tp_getattro"))) + if getattribute_entry is not None: + code.putln( + "PyObject *v = %s(o, n);" % ( + getattribute_entry.func_cname)) + else: + if not has_instance_dict and scope.parent_type.is_final_type: + # Final with no dict => use faster type attribute lookup. + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c")) + generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" + elif not has_instance_dict or has_instance_dict == "extern": + # No dict in the known ancestors, but don't know about extern ancestors or subtypes. + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c")) + generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr" + else: + generic_getattr_cfunc = "PyObject_GenericGetAttr" + code.putln( + "PyObject *v = %s(o, n);" % generic_getattr_cfunc) + if getattr_entry is not None: + code.putln( + "if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {") + code.putln( + "PyErr_Clear();") + code.putln( + "v = %s(o, n);" % ( + getattr_entry.func_cname)) + code.putln( + "}") + code.putln( + "return v;") + code.putln( + "}") + + def generate_setattro_function(self, scope, code): + # Setting and deleting an attribute are both done through + # the setattro method, so we dispatch to user's __setattr__ + # or __delattr__ or fall back on PyObject_GenericSetAttr. + base_type = scope.parent_type.base_type + set_entry = scope.lookup_here("__setattr__") + del_entry = scope.lookup_here("__delattr__") + code.putln("") + code.putln( + "static int %s(PyObject *o, PyObject *n, PyObject *v) {" % ( + scope.mangle_internal("tp_setattro"))) + code.putln( + "if (v) {") + if set_entry: + code.putln( + "return %s(o, n, v);" % ( + set_entry.func_cname)) + else: + self.generate_guarded_basetype_call( + base_type, None, "tp_setattro", "o, n, v", code) + code.putln( + "return PyObject_GenericSetAttr(o, n, v);") + code.putln( + "}") + code.putln( + "else {") + if del_entry: + code.putln( + "return %s(o, n);" % ( + del_entry.func_cname)) + else: + self.generate_guarded_basetype_call( + base_type, None, "tp_setattro", "o, n, v", code) + code.putln( + "return PyObject_GenericSetAttr(o, n, 0);") + code.putln( + "}") + code.putln( + "}") + + def generate_descr_get_function(self, scope, code): + # The __get__ function of a descriptor object can be + # called with NULL for the second or third arguments + # under some circumstances, so we replace them with + # None in that case. + user_get_entry = scope.lookup_here("__get__") + code.putln("") + code.putln( + "static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" % ( + scope.mangle_internal("tp_descr_get"))) + code.putln( + "PyObject *r = 0;") + code.putln( + "if (!i) i = Py_None;") + code.putln( + "if (!c) c = Py_None;") + #code.put_incref("i", py_object_type) + #code.put_incref("c", py_object_type) + code.putln( + "r = %s(o, i, c);" % ( + user_get_entry.func_cname)) + #code.put_decref("i", py_object_type) + #code.put_decref("c", py_object_type) + code.putln( + "return r;") + code.putln( + "}") + + def generate_descr_set_function(self, scope, code): + # Setting and deleting are both done through the __set__ + # method of a descriptor, so we dispatch to user's __set__ + # or __delete__ or raise an exception. + base_type = scope.parent_type.base_type + user_set_entry = scope.lookup_here("__set__") + user_del_entry = scope.lookup_here("__delete__") + code.putln("") + code.putln( + "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( + scope.mangle_internal("tp_descr_set"))) + code.putln( + "if (v) {") + if user_set_entry: + code.putln( + "return %s(o, i, v);" % ( + user_set_entry.func_cname)) + else: + self.generate_guarded_basetype_call( + base_type, None, "tp_descr_set", "o, i, v", code) + code.putln( + 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "else {") + if user_del_entry: + code.putln( + "return %s(o, i);" % ( + user_del_entry.func_cname)) + else: + self.generate_guarded_basetype_call( + base_type, None, "tp_descr_set", "o, i, v", code) + code.putln( + 'PyErr_SetString(PyExc_NotImplementedError, "__delete__");') + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "}") + + def generate_property_accessors(self, cclass_scope, code): + for entry in cclass_scope.property_entries: + property_scope = entry.scope + if property_scope.defines_any(["__get__"]): + self.generate_property_get_function(entry, code) + if property_scope.defines_any(["__set__", "__del__"]): + self.generate_property_set_function(entry, code) + + def generate_property_get_function(self, property_entry, code): + property_scope = property_entry.scope + property_entry.getter_cname = property_scope.parent_scope.mangle( + Naming.prop_get_prefix, property_entry.name) + get_entry = property_scope.lookup_here("__get__") + code.putln("") + code.putln( + "static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % ( + property_entry.getter_cname)) + code.putln( + "return %s(o);" % ( + get_entry.func_cname)) + code.putln( + "}") + + def generate_property_set_function(self, property_entry, code): + property_scope = property_entry.scope + property_entry.setter_cname = property_scope.parent_scope.mangle( + Naming.prop_set_prefix, property_entry.name) + set_entry = property_scope.lookup_here("__set__") + del_entry = property_scope.lookup_here("__del__") + code.putln("") + code.putln( + "static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" % ( + property_entry.setter_cname)) + code.putln( + "if (v) {") + if set_entry: + code.putln( + "return %s(o, v);" % ( + set_entry.func_cname)) + else: + code.putln( + 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "else {") + if del_entry: + code.putln( + "return %s(o);" % ( + del_entry.func_cname)) + else: + code.putln( + 'PyErr_SetString(PyExc_NotImplementedError, "__del__");') + code.putln( + "return -1;") + code.putln( + "}") + code.putln( + "}") + + def generate_typeobj_spec(self, entry, code): + ext_type = entry.type + scope = ext_type.scope + + members_slot = TypeSlots.get_slot_by_name("tp_members", code.globalstate.directives) + members_slot.generate_substructure_spec(scope, code) + + buffer_slot = TypeSlots.get_slot_by_name("tp_as_buffer", code.globalstate.directives) + if not buffer_slot.is_empty(scope): + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") + buffer_slot.generate_substructure(scope, code) + code.putln("#endif") + + code.putln("static PyType_Slot %s_slots[] = {" % ext_type.typeobj_cname) + for slot in TypeSlots.get_slot_table(code.globalstate.directives): + slot.generate_spec(scope, code) + code.putln("{0, 0},") + code.putln("};") + + if ext_type.typedef_flag: + objstruct = ext_type.objstruct_cname + else: + objstruct = "struct %s" % ext_type.objstruct_cname + classname = scope.class_name.as_c_string_literal() + code.putln("static PyType_Spec %s_spec = {" % ext_type.typeobj_cname) + code.putln('"%s.%s",' % (self.full_module_name, classname.replace('"', ''))) + code.putln("sizeof(%s)," % objstruct) + code.putln("0,") + code.putln("%s," % TypeSlots.get_slot_by_name("tp_flags", scope.directives).slot_code(scope)) + code.putln("%s_slots," % ext_type.typeobj_cname) + code.putln("};") + + def generate_typeobj_definition(self, modname, entry, code): + type = entry.type + scope = type.scope + for suite in TypeSlots.get_slot_table(code.globalstate.directives).substructures: + suite.generate_substructure(scope, code) + code.putln("") + if entry.visibility == 'public': + header = "DL_EXPORT(PyTypeObject) %s = {" + else: + header = "static PyTypeObject %s = {" + #code.putln(header % scope.parent_type.typeobj_cname) + code.putln(header % type.typeobj_cname) + code.putln( + "PyVarObject_HEAD_INIT(0, 0)") + classname = scope.class_name.as_c_string_literal() + code.putln( + '"%s."%s, /*tp_name*/' % ( + self.full_module_name, + classname)) + if type.typedef_flag: + objstruct = type.objstruct_cname + else: + objstruct = "struct %s" % type.objstruct_cname + code.putln( + "sizeof(%s), /*tp_basicsize*/" % objstruct) + code.putln( + "0, /*tp_itemsize*/") + for slot in TypeSlots.get_slot_table(code.globalstate.directives): + slot.generate(scope, code) + code.putln( + "};") + + def generate_method_table(self, env, code): + if env.is_c_class_scope and not env.pyfunc_entries: + return + binding = env.directives['binding'] + + code.putln("") + wrapper_code_writer = code.insertion_point() + + code.putln( + "static PyMethodDef %s[] = {" % ( + env.method_table_cname)) + for entry in env.pyfunc_entries: + if not entry.fused_cfunction and not (binding and entry.is_overridable): + code.put_pymethoddef(entry, ",", wrapper_code_writer=wrapper_code_writer) + code.putln( + "{0, 0, 0, 0}") + code.putln( + "};") + + if wrapper_code_writer.getvalue(): + wrapper_code_writer.putln("") + + def generate_dict_getter_function(self, scope, code): + dict_attr = scope.lookup_here("__dict__") + if not dict_attr or not dict_attr.is_variable: + return + func_name = scope.mangle_internal("__dict__getter") + dict_name = dict_attr.cname + code.putln("") + code.putln("static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % func_name) + self.generate_self_cast(scope, code) + code.putln("if (unlikely(!p->%s)){" % dict_name) + code.putln("p->%s = PyDict_New();" % dict_name) + code.putln("}") + code.putln("Py_XINCREF(p->%s);" % dict_name) + code.putln("return p->%s;" % dict_name) + code.putln("}") + + def generate_getset_table(self, env, code): + if env.property_entries: + code.putln("") + code.putln( + "static struct PyGetSetDef %s[] = {" % + env.getset_table_cname) + for entry in env.property_entries: + doc = entry.doc + if doc: + if doc.is_unicode: + doc = doc.as_utf8_string() + doc_code = "PyDoc_STR(%s)" % doc.as_c_string_literal() + else: + doc_code = "0" + code.putln( + '{(char *)%s, %s, %s, (char *)%s, 0},' % ( + entry.name.as_c_string_literal(), + entry.getter_cname or "0", + entry.setter_cname or "0", + doc_code)) + code.putln( + "{0, 0, 0, 0, 0}") + code.putln( + "};") + + def create_import_star_conversion_utility_code(self, env): + # Create all conversion helpers that are needed for "import *" assignments. + # Must be done before code generation to support CythonUtilityCode. + for name, entry in sorted(env.entries.items()): + if entry.is_cglobal and entry.used: + if not entry.type.is_pyobject: + entry.type.create_from_py_utility_code(env) + + def generate_import_star(self, env, code): + env.use_utility_code(UtilityCode.load_cached("CStringEquals", "StringTools.c")) + code.putln() + code.enter_cfunc_scope() # as we need labels + code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set) + + code.putln("static const char* internal_type_names[] = {") + for name, entry in sorted(env.entries.items()): + if entry.is_type: + code.putln('"%s",' % name) + code.putln("0") + code.putln("};") + + code.putln("const char** type_name = internal_type_names;") + code.putln("while (*type_name) {") + code.putln("if (__Pyx_StrEq(name, *type_name)) {") + code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);') + code.putln('goto bad;') + code.putln("}") + code.putln("type_name++;") + code.putln("}") + + old_error_label = code.new_error_label() + code.putln("if (0);") # so the first one can be "else if" + msvc_count = 0 + for name, entry in sorted(env.entries.items()): + if entry.is_cglobal and entry.used and not entry.type.is_const: + msvc_count += 1 + if msvc_count % 100 == 0: + code.putln("#ifdef _MSC_VER") + code.putln("if (0); /* Workaround for MSVC C1061. */") + code.putln("#endif") + code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name) + if entry.type.is_pyobject: + if entry.type.is_extension_type or entry.type.is_builtin_type: + code.putln("if (!(%s)) %s;" % ( + entry.type.type_test_code("o"), + code.error_goto(entry.pos))) + code.putln("Py_INCREF(o);") + code.put_decref(entry.cname, entry.type, nanny=False) + code.putln("%s = %s;" % ( + entry.cname, + PyrexTypes.typecast(entry.type, py_object_type, "o"))) + elif entry.type.create_from_py_utility_code(env): + # if available, utility code was already created in self.prepare_utility_code() + code.putln(entry.type.from_py_call_code( + 'o', entry.cname, entry.pos, code)) + else: + code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % ( + name, entry.type)) + code.putln(code.error_goto(entry.pos)) + code.putln("}") + code.putln("else {") + code.putln("if (PyObject_SetAttr(%s, py_name, o) < 0) goto bad;" % Naming.module_cname) + code.putln("}") + code.putln("return 0;") + if code.label_used(code.error_label): + code.put_label(code.error_label) + # This helps locate the offending name. + code.put_add_traceback(EncodedString(self.full_module_name)) + code.error_label = old_error_label + code.putln("bad:") + code.putln("return -1;") + code.putln("}") + code.putln("") + code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1]) + code.exit_cfunc_scope() # done with labels + + def generate_module_state_start(self, env, code): + # TODO: Refactor to move module state struct decl closer to the static decl + code.putln('typedef struct {') + code.putln('PyObject *%s;' % env.module_dict_cname) + code.putln('PyObject *%s;' % Naming.builtins_cname) + code.putln('PyObject *%s;' % Naming.cython_runtime_cname) + code.putln('PyObject *%s;' % Naming.empty_tuple) + code.putln('PyObject *%s;' % Naming.empty_bytes) + code.putln('PyObject *%s;' % Naming.empty_unicode) + if Options.pre_import is not None: + code.putln('PyObject *%s;' % Naming.preimport_cname) + for type_cname, used_name in Naming.used_types_and_macros: + code.putln('#ifdef %s' % used_name) + code.putln('PyTypeObject *%s;' % type_cname) + code.putln('#endif') + + def generate_module_state_end(self, env, modules, globalstate): + module_state = globalstate['module_state'] + module_state_defines = globalstate['module_state_defines'] + module_state_clear = globalstate['module_state_clear'] + module_state_traverse = globalstate['module_state_traverse'] + module_state.putln('} %s;' % Naming.modulestate_cname) + module_state.putln('') + module_state.putln("#if CYTHON_USE_MODULE_STATE") + module_state.putln('#ifdef __cplusplus') + module_state.putln('namespace {') + module_state.putln('extern struct PyModuleDef %s;' % Naming.pymoduledef_cname) + module_state.putln('} /* anonymous namespace */') + module_state.putln('#else') + module_state.putln('static struct PyModuleDef %s;' % Naming.pymoduledef_cname) + module_state.putln('#endif') + module_state.putln('') + module_state.putln('#define %s(o) ((%s *)__Pyx_PyModule_GetState(o))' % ( + Naming.modulestate_cname, + Naming.modulestate_cname)) + module_state.putln('') + module_state.putln('#define %s (%s(PyState_FindModule(&%s)))' % ( + Naming.modulestateglobal_cname, + Naming.modulestate_cname, + Naming.pymoduledef_cname)) + module_state.putln('') + module_state.putln('#define %s (PyState_FindModule(&%s))' % ( + env.module_cname, + Naming.pymoduledef_cname)) + module_state.putln("#else") + module_state.putln('static %s %s_static =' % ( + Naming.modulestate_cname, + Naming.modulestateglobal_cname + )) + module_state.putln('#ifdef __cplusplus') + # C++ likes to be initialized with {} to avoid "missing initializer" warnings + # but it isn't valid C + module_state.putln(' {};') + module_state.putln('#else') + module_state.putln(' {0};') + module_state.putln('#endif') + module_state.putln('static %s *%s = &%s_static;' % ( + Naming.modulestate_cname, + Naming.modulestateglobal_cname, + Naming.modulestateglobal_cname + )) + module_state.putln("#endif") + module_state_clear.putln("return 0;") + module_state_clear.putln("}") + module_state_clear.putln("#endif") + module_state_traverse.putln("return 0;") + module_state_traverse.putln("}") + module_state_traverse.putln("#endif") + + def generate_module_state_defines(self, env, code): + code.putln('#define %s %s->%s' % ( + env.module_dict_cname, + Naming.modulestateglobal_cname, + env.module_dict_cname)) + code.putln('#define %s %s->%s' % ( + Naming.builtins_cname, + Naming.modulestateglobal_cname, + Naming.builtins_cname)) + code.putln('#define %s %s->%s' % ( + Naming.cython_runtime_cname, + Naming.modulestateglobal_cname, + Naming.cython_runtime_cname)) + code.putln('#define %s %s->%s' % ( + Naming.empty_tuple, + Naming.modulestateglobal_cname, + Naming.empty_tuple)) + code.putln('#define %s %s->%s' % ( + Naming.empty_bytes, + Naming.modulestateglobal_cname, + Naming.empty_bytes)) + code.putln('#define %s %s->%s' % ( + Naming.empty_unicode, + Naming.modulestateglobal_cname, + Naming.empty_unicode)) + if Options.pre_import is not None: + code.putln('#define %s %s->%s' % ( + Naming.preimport_cname, + Naming.modulestateglobal_cname, + Naming.preimport_cname)) + for cname, used_name in Naming.used_types_and_macros: + code.putln('#ifdef %s' % used_name) + code.putln('#define %s %s->%s' % ( + cname, + Naming.modulestateglobal_cname, + cname)) + code.putln('#endif') + + def generate_module_state_clear(self, env, code): + code.putln("#if CYTHON_USE_MODULE_STATE") + code.putln("static int %s_clear(PyObject *m) {" % Naming.module_cname) + code.putln("%s *clear_module_state = %s(m);" % ( + Naming.modulestate_cname, + Naming.modulestate_cname)) + code.putln("if (!clear_module_state) return 0;") + code.putln('Py_CLEAR(clear_module_state->%s);' % + env.module_dict_cname) + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.builtins_cname) + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.cython_runtime_cname) + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.empty_tuple) + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.empty_bytes) + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.empty_unicode) + code.putln('#ifdef __Pyx_CyFunction_USED') + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.cyfunction_type_cname) + code.putln('#endif') + code.putln('#ifdef __Pyx_FusedFunction_USED') + code.putln('Py_CLEAR(clear_module_state->%s);' % + Naming.fusedfunction_type_cname) + code.putln('#endif') + + def generate_module_state_traverse(self, env, code): + code.putln("#if CYTHON_USE_MODULE_STATE") + code.putln("static int %s_traverse(PyObject *m, visitproc visit, void *arg) {" % Naming.module_cname) + code.putln("%s *traverse_module_state = %s(m);" % ( + Naming.modulestate_cname, + Naming.modulestate_cname)) + code.putln("if (!traverse_module_state) return 0;") + code.putln('Py_VISIT(traverse_module_state->%s);' % + env.module_dict_cname) + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.builtins_cname) + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.cython_runtime_cname) + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.empty_tuple) + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.empty_bytes) + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.empty_unicode) + code.putln('#ifdef __Pyx_CyFunction_USED') + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.cyfunction_type_cname) + code.putln('#endif') + code.putln('#ifdef __Pyx_FusedFunction_USED') + code.putln('Py_VISIT(traverse_module_state->%s);' % + Naming.fusedfunction_type_cname) + code.putln('#endif') + + def generate_module_init_func(self, imported_modules, env, code): + subfunction = self.mod_init_subfunction(self.pos, self.scope, code) + + self.generate_pymoduledef_struct(env, code) + + code.enter_cfunc_scope(self.scope) + code.putln("") + code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0]) + if env.module_name.isascii(): + py2_mod_name = env.module_name + fail_compilation_in_py2 = False + else: + fail_compilation_in_py2 = True + # at this point py2_mod_name is largely a placeholder and the value doesn't matter + py2_mod_name = env.module_name.encode("ascii", errors="ignore").decode("utf8") + + header2 = "__Pyx_PyMODINIT_FUNC init%s(void)" % py2_mod_name + header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env) + header3 = EncodedString(header3) + code.putln("#if PY_MAJOR_VERSION < 3") + # Optimise for small code size as the module init function is only executed once. + code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2) + if fail_compilation_in_py2: + code.putln('#error "Unicode module names are not supported in Python 2";') + if self.scope.is_package: + code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))") + code.putln("__Pyx_PyMODINIT_FUNC init__init__(void) { init%s(); }" % py2_mod_name) + code.putln("#endif") + code.putln(header2) + code.putln("#else") + code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3) + if self.scope.is_package: + code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))") + code.putln("__Pyx_PyMODINIT_FUNC PyInit___init__(void) { return %s(); }" % ( + self.mod_init_func_cname('PyInit', env))) + code.putln("#endif") + # Hack for a distutils bug - https://bugs.python.org/issue39432 + # distutils attempts to make visible a slightly wrong PyInitU module name. Just create a dummy + # function to keep it quiet + wrong_punycode_module_name = self.wrong_punycode_module_name(env.module_name) + if wrong_punycode_module_name: + code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))") + code.putln("void %s(void) {} /* workaround for https://bugs.python.org/issue39432 */" % wrong_punycode_module_name) + code.putln("#endif") + code.putln(header3) + + # CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.) + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + code.putln("{") + code.putln("return PyModuleDef_Init(&%s);" % Naming.pymoduledef_cname) + code.putln("}") + + mod_create_func = UtilityCode.load_as_string("ModuleCreationPEP489", "ModuleSetupCode.c")[1] + code.put(mod_create_func) + + code.putln("") + # main module init code lives in Py_mod_exec function, not in PyInit function + code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % ( + self.module_init_func_cname(), + Naming.pymodinit_module_arg)) + code.putln("#endif") # PEP489 + + code.putln("#endif") # Py3 + + # start of module init/exec function (pre/post PEP 489) + code.putln("{") + code.putln('int stringtab_initialized = 0;') + code.putln("#if CYTHON_USE_MODULE_STATE") + code.putln('int pystate_addmodule_run = 0;') + code.putln("#endif") + + tempdecl_code = code.insertion_point() + + profile = code.globalstate.directives['profile'] + linetrace = code.globalstate.directives['linetrace'] + if profile or linetrace: + if linetrace: + code.use_fast_gil_utility_code() + code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c")) + + code.put_declare_refcount_context() + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + # Most extension modules simply can't deal with it, and Cython isn't ready either. + # See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support + code.putln("if (%s) {" % Naming.module_cname) + # Hack: enforce single initialisation. + code.putln("if (%s == %s) return 0;" % ( + Naming.module_cname, + Naming.pymodinit_module_arg, + )) + code.putln('PyErr_SetString(PyExc_RuntimeError,' + ' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' % + env.module_name.as_c_string_literal()[1:-1]) + code.putln("return -1;") + code.putln("}") + code.putln("#elif PY_MAJOR_VERSION >= 3") + # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489). + code.putln("if (%s) return __Pyx_NewRef(%s);" % ( + Naming.module_cname, + Naming.module_cname, + )) + code.putln("#endif") + + code.putln("/*--- Module creation code ---*/") + self.generate_module_creation_code(env, code) + + if profile or linetrace: + tempdecl_code.put_trace_declarations() + code.put_trace_frame_init() + + refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1] + code.putln(refnanny_import_code.rstrip()) + code.put_setup_refcount_context(header3) + + env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c")) + code.put_error_if_neg(self.pos, "__Pyx_check_binary_version(" + "__PYX_LIMITED_VERSION_HEX, " + "__Pyx_get_runtime_version(), " + "CYTHON_COMPILING_IN_LIMITED_API)" + ) + + code.putln("#ifdef __Pxy_PyFrame_Initialize_Offsets") + code.putln("__Pxy_PyFrame_Initialize_Offsets();") + code.putln("#endif") + code.putln("%s = PyTuple_New(0); %s" % ( + Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos))) + code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % ( + Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos))) + code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % ( + Naming.empty_unicode, code.error_goto_if_null(Naming.empty_unicode, self.pos))) + + for ext_type in ('CyFunction', 'FusedFunction', 'Coroutine', 'Generator', 'AsyncGen', 'StopAsyncIteration'): + code.putln("#ifdef __Pyx_%s_USED" % ext_type) + code.put_error_if_neg(self.pos, "__pyx_%s_init(%s)" % (ext_type, env.module_cname)) + code.putln("#endif") + + code.putln("/*--- Library function declarations ---*/") + if env.directives['np_pythran']: + code.put_error_if_neg(self.pos, "_import_array()") + + code.putln("/*--- Threads initialization code ---*/") + code.putln("#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 " + "&& defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS") + code.putln("PyEval_InitThreads();") + code.putln("#endif") + + code.putln("/*--- Initialize various global constants etc. ---*/") + code.put_error_if_neg(self.pos, "__Pyx_InitConstants()") + code.putln("stringtab_initialized = 1;") + code.put_error_if_neg(self.pos, "__Pyx_InitGlobals()") # calls any utility code + + + code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || " + "__PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)") + code.put_error_if_neg(self.pos, "__Pyx_init_sys_getdefaultencoding_params()") + code.putln("#endif") + + code.putln("if (%s) {" % self.is_main_module_flag_cname()) + code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % ( + env.module_cname, + code.intern_identifier(EncodedString("__name__")), + code.intern_identifier(EncodedString("__main__")))) + code.putln("}") + + # set up __file__ and __path__, then add the module to sys.modules + self.generate_module_import_setup(env, code) + + if Options.cache_builtins: + code.putln("/*--- Builtin init code ---*/") + code.put_error_if_neg(self.pos, "__Pyx_InitCachedBuiltins()") + + code.putln("/*--- Constants init code ---*/") + code.put_error_if_neg(self.pos, "__Pyx_InitCachedConstants()") + + code.putln("/*--- Global type/function init code ---*/") + + with subfunction("Global init code") as inner_code: + self.generate_global_init_code(env, inner_code) + + with subfunction("Variable export code") as inner_code: + self.generate_c_variable_export_code(env, inner_code) + + with subfunction("Function export code") as inner_code: + self.generate_c_function_export_code(env, inner_code) + + with subfunction("Type init code") as inner_code: + self.generate_type_init_code(env, inner_code) + + with subfunction("Type import code") as inner_code: + for module in imported_modules: + self.generate_type_import_code_for_module(module, env, inner_code) + + with subfunction("Variable import code") as inner_code: + for module in imported_modules: + self.generate_c_variable_import_code_for_module(module, env, inner_code) + + with subfunction("Function import code") as inner_code: + for module in imported_modules: + self.specialize_fused_types(module) + self.generate_c_function_import_code_for_module(module, env, inner_code) + + code.putln("/*--- Execution code ---*/") + code.mark_pos(None) + + code.putln("#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)") + code.put_error_if_neg(self.pos, "__Pyx_patch_abc()") + code.putln("#endif") + + if profile or linetrace: + code.put_trace_call(header3, self.pos, nogil=not code.funcstate.gil_owned) + code.funcstate.can_trace = True + + code.mark_pos(None) + self.body.generate_execution_code(code) + code.mark_pos(None) + + if profile or linetrace: + code.funcstate.can_trace = False + code.put_trace_return("Py_None", nogil=not code.funcstate.gil_owned) + + code.putln() + code.putln("/*--- Wrapped vars code ---*/") + self.generate_wrapped_entries_code(env, code) + code.putln() + + if Options.generate_cleanup_code: + code.globalstate.use_utility_code( + UtilityCode.load_cached("RegisterModuleCleanup", "ModuleSetupCode.c")) + code.putln("if (__Pyx_RegisterCleanup()) %s" % code.error_goto(self.pos)) + + code.put_goto(code.return_label) + code.put_label(code.error_label) + for cname, type in code.funcstate.all_managed_temps(): + code.put_xdecref(cname, type) + code.putln('if (%s) {' % env.module_cname) + code.putln('if (%s && stringtab_initialized) {' % env.module_dict_cname) + # We can run into errors before the module or stringtab are initialized. + # In this case it is not safe to add a traceback (because it uses the stringtab) + code.put_add_traceback(EncodedString("init %s" % env.qualified_name)) + code.globalstate.use_utility_code(Nodes.traceback_utility_code) + # Module reference and module dict are in global variables which might still be needed + # for cleanup, atexit code, etc., so leaking is better than crashing. + # At least clearing the module dict here might be a good idea, but could still break + # user code in atexit or other global registries. + ##code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False) + code.putln('}') + code.putln("#if !CYTHON_USE_MODULE_STATE") + code.put_decref_clear(env.module_cname, py_object_type, nanny=False, clear_before_decref=True) + code.putln("#else") + # This section is mainly for the limited API. env.module_cname still owns a reference so + # decrement that + code.put_decref(env.module_cname, py_object_type, nanny=False) + # Also remove the failed module from the module state lookup + # fetch/restore the error indicator because PyState_RemvoeModule might fail itself + code.putln("if (pystate_addmodule_run) {") + code.putln("PyObject *tp, *value, *tb;") + code.putln("PyErr_Fetch(&tp, &value, &tb);") + code.putln("PyState_RemoveModule(&%s);" % Naming.pymoduledef_cname) + code.putln("PyErr_Restore(tp, value, tb);") + code.putln("}") + code.putln("#endif") + code.putln('} else if (!PyErr_Occurred()) {') + code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' % + env.qualified_name.as_c_string_literal()[1:-1]) + code.putln('}') + code.put_label(code.return_label) + + code.put_finish_refcount_context() + + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + code.putln("return (%s != NULL) ? 0 : -1;" % env.module_cname) + code.putln("#elif PY_MAJOR_VERSION >= 3") + code.putln("return %s;" % env.module_cname) + code.putln("#else") + code.putln("return;") + code.putln("#endif") + code.putln('}') + + tempdecl_code.put_temp_declarations(code.funcstate) + + code.exit_cfunc_scope() + + def mod_init_subfunction(self, pos, scope, orig_code): + """ + Return a context manager that allows deviating the module init code generation + into a separate function and instead inserts a call to it. + + Can be reused sequentially to create multiple functions. + The functions get inserted at the point where the context manager was created. + The call gets inserted where the context manager is used (on entry). + """ + prototypes = orig_code.insertion_point() + prototypes.putln("") + function_code = orig_code.insertion_point() + function_code.putln("") + + class ModInitSubfunction(object): + def __init__(self, code_type): + cname = '_'.join(code_type.lower().split()) + assert re.match("^[a-z0-9_]+$", cname) + self.cfunc_name = "__Pyx_modinit_%s" % cname + self.description = code_type + self.tempdecl_code = None + self.call_code = None + + def __enter__(self): + self.call_code = orig_code.insertion_point() + code = function_code + code.enter_cfunc_scope(scope) + prototypes.putln("static CYTHON_SMALL_CODE int %s(void); /*proto*/" % self.cfunc_name) + code.putln("static int %s(void) {" % self.cfunc_name) + code.put_declare_refcount_context() + self.tempdecl_code = code.insertion_point() + code.put_setup_refcount_context(EncodedString(self.cfunc_name)) + # Leave a grepable marker that makes it easy to find the generator source. + code.putln("/*--- %s ---*/" % self.description) + return code + + def __exit__(self, *args): + code = function_code + code.put_finish_refcount_context() + code.putln("return 0;") + + self.tempdecl_code.put_temp_declarations(code.funcstate) + self.tempdecl_code = None + + needs_error_handling = code.label_used(code.error_label) + if needs_error_handling: + code.put_label(code.error_label) + for cname, type in code.funcstate.all_managed_temps(): + code.put_xdecref(cname, type) + code.put_finish_refcount_context() + code.putln("return -1;") + code.putln("}") + code.exit_cfunc_scope() + code.putln("") + + if needs_error_handling: + self.call_code.putln( + self.call_code.error_goto_if_neg("%s()" % self.cfunc_name, pos)) + else: + self.call_code.putln("(void)%s();" % self.cfunc_name) + self.call_code = None + + return ModInitSubfunction + + def generate_module_import_setup(self, env, code): + module_path = env.directives['set_initial_path'] + if module_path == 'SOURCEFILE': + module_path = self.pos[0].filename + + if module_path: + code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {') + code.putln('if (PyObject_SetAttrString(%s, "__file__", %s) < 0) %s;' % ( + env.module_cname, + code.globalstate.get_py_string_const( + EncodedString(decode_filename(module_path))).cname, + code.error_goto(self.pos))) + code.putln("}") + + if env.is_package: + # set __path__ to mark the module as package + code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {') + temp = code.funcstate.allocate_temp(py_object_type, True) + code.putln('%s = Py_BuildValue("[O]", %s); %s' % ( + temp, + code.globalstate.get_py_string_const( + EncodedString(decode_filename( + os.path.dirname(module_path)))).cname, + code.error_goto_if_null(temp, self.pos))) + code.put_gotref(temp, py_object_type) + code.putln( + 'if (PyObject_SetAttrString(%s, "__path__", %s) < 0) %s;' % ( + env.module_cname, temp, code.error_goto(self.pos))) + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + code.putln("}") + + elif env.is_package: + # packages require __path__, so all we can do is try to figure + # out the module path at runtime by rerunning the import lookup + code.putln("if (!CYTHON_PEP489_MULTI_PHASE_INIT) {") + code.globalstate.use_utility_code(UtilityCode.load( + "SetPackagePathFromImportLib", "ImportExport.c")) + code.putln(code.error_goto_if_neg( + '__Pyx_SetPackagePathFromImportLib(%s)' % ( + code.globalstate.get_py_string_const( + EncodedString(self.full_module_name)).cname), + self.pos)) + code.putln("}") + + # CPython may not have put us into sys.modules yet, but relative imports and reimports require it + fq_module_name = self.full_module_name + if fq_module_name.endswith('.__init__'): + fq_module_name = EncodedString(fq_module_name[:-len('.__init__')]) + fq_module_name_cstring = fq_module_name.as_c_string_literal() + code.putln("#if PY_MAJOR_VERSION >= 3") + code.putln("{") + code.putln("PyObject *modules = PyImport_GetModuleDict(); %s" % + code.error_goto_if_null("modules", self.pos)) + code.putln('if (!PyDict_GetItemString(modules, %s)) {' % fq_module_name_cstring) + code.putln(code.error_goto_if_neg('PyDict_SetItemString(modules, %s, %s)' % ( + fq_module_name_cstring, env.module_cname), self.pos)) + code.putln("}") + code.putln("}") + code.putln("#endif") + + def generate_module_cleanup_func(self, env, code): + if not Options.generate_cleanup_code: + return + + code.putln('static void %s(CYTHON_UNUSED PyObject *self) {' % + Naming.cleanup_cname) + code.enter_cfunc_scope(env) + + if Options.generate_cleanup_code >= 2: + code.putln("/*--- Global cleanup code ---*/") + rev_entries = list(env.var_entries) + rev_entries.reverse() + for entry in rev_entries: + if entry.visibility != 'extern': + if entry.type.is_pyobject and entry.used: + code.put_xdecref_clear( + entry.cname, entry.type, + clear_before_decref=True, + nanny=False) + code.putln("__Pyx_CleanupGlobals();") + if Options.generate_cleanup_code >= 3: + code.putln("/*--- Type import cleanup code ---*/") + for ext_type in sorted(env.types_imported, key=operator.attrgetter('typeptr_cname')): + code.put_xdecref_clear( + ext_type.typeptr_cname, ext_type, + clear_before_decref=True, + nanny=False) + if Options.cache_builtins: + code.putln("/*--- Builtin cleanup code ---*/") + for entry in env.cached_builtins: + code.put_xdecref_clear( + entry.cname, PyrexTypes.py_object_type, + clear_before_decref=True, + nanny=False) + code.putln("/*--- Intern cleanup code ---*/") + code.put_decref_clear(Naming.empty_tuple, + PyrexTypes.py_object_type, + clear_before_decref=True, + nanny=False) + for entry in env.c_class_entries: + cclass_type = entry.type + if cclass_type.is_external or cclass_type.base_type: + continue + if cclass_type.scope.directives.get('freelist', 0): + scope = cclass_type.scope + freelist_name = scope.mangle_internal(Naming.freelist_name) + freecount_name = scope.mangle_internal(Naming.freecount_name) + code.putln('#if CYTHON_USE_FREELISTS') + code.putln("while (%s > 0) {" % freecount_name) + code.putln("PyObject* o = (PyObject*)%s[--%s];" % ( + freelist_name, freecount_name)) + code.putln("#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY") + code.putln("(*Py_TYPE(o)->tp_free)(o);") + code.putln("#else") + # Asking for PyType_GetSlot(..., Py_tp_free) seems to cause an error in pypy + code.putln("freefunc tp_free = (freefunc)PyType_GetSlot(Py_TYPE(o), Py_tp_free);") + code.putln("if (tp_free) tp_free(o);") + code.putln("#endif") + code.putln("}") + code.putln('#endif') # CYTHON_USE_FREELISTS +# for entry in env.pynum_entries: +# code.put_decref_clear(entry.cname, +# PyrexTypes.py_object_type, +# nanny=False) +# for entry in env.all_pystring_entries: +# if entry.is_interned: +# code.put_decref_clear(entry.pystring_cname, +# PyrexTypes.py_object_type, +# nanny=False) +# for entry in env.default_entries: +# if entry.type.is_pyobject and entry.used: +# code.putln("Py_DECREF(%s); %s = 0;" % ( +# code.entry_as_pyobject(entry), entry.cname)) + if Options.pre_import is not None: + code.put_decref_clear(Naming.preimport_cname, py_object_type, + nanny=False, clear_before_decref=True) + for cname in [Naming.cython_runtime_cname, Naming.builtins_cname]: + code.put_decref_clear(cname, py_object_type, nanny=False, clear_before_decref=True) + code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False, clear_before_decref=True) + + def generate_main_method(self, env, code): + module_is_main = self.is_main_module_flag_cname() + if Options.embed == "main": + wmain = "wmain" + else: + wmain = Options.embed + main_method = UtilityCode.load_cached("MainFunction", "Embed.c") + code.globalstate.use_utility_code( + main_method.specialize( + module_name=env.module_name, + module_is_main=module_is_main, + main_method=Options.embed, + wmain_method=wmain)) + + def punycode_module_name(self, prefix, name): + # adapted from PEP483 + try: + name = '_' + name.encode('ascii').decode('ascii') + except UnicodeEncodeError: + name = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii') + return "%s%s" % (prefix, name) + + def wrong_punycode_module_name(self, name): + # to work around a distutils bug by also generating an incorrect symbol... + try: + name.encode("ascii") + return None # workaround is not needed + except UnicodeEncodeError: + return "PyInitU" + (u"_"+name).encode('punycode').replace(b'-', b'_').decode('ascii') + + def mod_init_func_cname(self, prefix, env): + # from PEP483 + return self.punycode_module_name(prefix, env.module_name) + + # Returns the name of the C-function that corresponds to the module initialisation. + # (module initialisation == the cython code outside of functions) + # Note that this should never be the name of a wrapper and always the name of the + # function containing the actual code. Otherwise, cygdb will experience problems. + def module_init_func_cname(self): + env = self.scope + return self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env) + + def generate_pymoduledef_struct(self, env, code): + if env.doc: + doc = "%s" % code.get_string_const(env.doc) + else: + doc = "0" + if Options.generate_cleanup_code: + cleanup_func = "(freefunc)%s" % Naming.cleanup_cname + else: + cleanup_func = 'NULL' + + code.putln("") + code.putln("#if PY_MAJOR_VERSION >= 3") + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + exec_func_cname = self.module_init_func_cname() + code.putln("static PyObject* %s(PyObject *spec, PyModuleDef *def); /*proto*/" % + Naming.pymodule_create_func_cname) + code.putln("static int %s(PyObject* module); /*proto*/" % exec_func_cname) + + code.putln("static PyModuleDef_Slot %s[] = {" % Naming.pymoduledef_slots_cname) + code.putln("{Py_mod_create, (void*)%s}," % Naming.pymodule_create_func_cname) + code.putln("{Py_mod_exec, (void*)%s}," % exec_func_cname) + code.putln("{0, NULL}") + code.putln("};") + if not env.module_name.isascii(): + code.putln("#else /* CYTHON_PEP489_MULTI_PHASE_INIT */") + code.putln('#error "Unicode module names are only supported with multi-phase init' + ' as per PEP489"') + code.putln("#endif") + + code.putln("") + code.putln('#ifdef __cplusplus') + code.putln('namespace {') + code.putln("struct PyModuleDef %s =" % Naming.pymoduledef_cname) + code.putln('#else') + code.putln("static struct PyModuleDef %s =" % Naming.pymoduledef_cname) + code.putln('#endif') + code.putln('{') + code.putln(" PyModuleDef_HEAD_INIT,") + code.putln(' %s,' % env.module_name.as_c_string_literal()) + code.putln(" %s, /* m_doc */" % doc) + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + code.putln(" 0, /* m_size */") + code.putln("#elif CYTHON_USE_MODULE_STATE") # FIXME: should allow combination with PEP-489 + code.putln(" sizeof(%s), /* m_size */" % Naming.modulestate_cname) + code.putln("#else") + code.putln(" -1, /* m_size */") + code.putln("#endif") + code.putln(" %s /* m_methods */," % env.method_table_cname) + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + code.putln(" %s, /* m_slots */" % Naming.pymoduledef_slots_cname) + code.putln("#else") + code.putln(" NULL, /* m_reload */") + code.putln("#endif") + code.putln("#if CYTHON_USE_MODULE_STATE") + code.putln(" %s_traverse, /* m_traverse */" % Naming.module_cname) + code.putln(" %s_clear, /* m_clear */" % Naming.module_cname) + code.putln(" %s /* m_free */" % cleanup_func) + code.putln("#else") + code.putln(" NULL, /* m_traverse */") + code.putln(" NULL, /* m_clear */") + code.putln(" %s /* m_free */" % cleanup_func) + code.putln("#endif") + code.putln("};") + code.putln('#ifdef __cplusplus') + code.putln('} /* anonymous namespace */') + code.putln('#endif') + code.putln("#endif") + + def generate_module_creation_code(self, env, code): + # Generate code to create the module object and + # install the builtins. + if env.doc: + doc = "%s" % code.get_string_const(env.doc) + else: + doc = "0" + + code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") + code.putln("%s = %s;" % ( + env.module_cname, + Naming.pymodinit_module_arg)) + code.put_incref(env.module_cname, py_object_type, nanny=False) + code.putln("#else") + code.putln("#if PY_MAJOR_VERSION < 3") + code.putln( + '%s = Py_InitModule4(%s, %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % ( + env.module_cname, + env.module_name.as_c_string_literal(), + env.method_table_cname, + doc, + env.module_cname)) + code.putln(code.error_goto_if_null(env.module_cname, self.pos)) + code.putln("#elif CYTHON_USE_MODULE_STATE") + # manage_ref is False (and refnanny calls are omitted) because refnanny isn't yet initialized + module_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=False) + code.putln( + "%s = PyModule_Create(&%s); %s" % ( + module_temp, + Naming.pymoduledef_cname, + code.error_goto_if_null(module_temp, self.pos))) + code.putln("{") + # So that PyState_FindModule works in the init function: + code.putln("int add_module_result = PyState_AddModule(%s, &%s);" % ( + module_temp, Naming.pymoduledef_cname)) + code.putln("%s = 0; /* transfer ownership from %s to %s pseudovariable */" % ( + module_temp, module_temp, env.module_name.as_c_string_literal() + )) + # At this stage the module likely has a refcount of 2 - one owned by the list + # inside PyState_AddModule and one owned by "__pyx_m" (and returned from this + # function as a new reference). + code.putln(code.error_goto_if_neg("add_module_result", self.pos)) + code.putln("pystate_addmodule_run = 1;") + code.putln("}") + code.funcstate.release_temp(module_temp) + code.putln('#else') + code.putln( + "%s = PyModule_Create(&%s);" % ( + env.module_cname, + Naming.pymoduledef_cname)) + code.putln(code.error_goto_if_null(env.module_cname, self.pos)) + code.putln("#endif") + code.putln("#endif") # CYTHON_PEP489_MULTI_PHASE_INIT + code.putln("CYTHON_UNUSED_VAR(%s);" % module_temp) # only used in limited API + + code.putln( + "%s = PyModule_GetDict(%s); %s" % ( + env.module_dict_cname, env.module_cname, + code.error_goto_if_null(env.module_dict_cname, self.pos))) + code.put_incref(env.module_dict_cname, py_object_type, nanny=False) + + code.putln( + '%s = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); %s' % ( + Naming.builtins_cname, + code.error_goto_if_null(Naming.builtins_cname, self.pos))) + code.putln( + '%s = __Pyx_PyImport_AddModuleRef((const char *) "cython_runtime"); %s' % ( + Naming.cython_runtime_cname, + code.error_goto_if_null(Naming.cython_runtime_cname, self.pos))) + code.putln( + 'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s' % ( + env.module_cname, + Naming.builtins_cname, + code.error_goto(self.pos))) + if Options.pre_import is not None: + code.putln( + '%s = __Pyx_PyImport_AddModuleRef("%s"); %s' % ( + Naming.preimport_cname, + Options.pre_import, + code.error_goto_if_null(Naming.preimport_cname, self.pos))) + + def generate_global_init_code(self, env, code): + # Generate code to initialise global PyObject * + # variables to None. + for entry in env.var_entries: + if entry.visibility != 'extern': + if entry.used: + entry.type.global_init_code(entry, code) + + def generate_wrapped_entries_code(self, env, code): + for name, entry in sorted(env.entries.items()): + if (entry.create_wrapper + and not entry.is_type + and entry.scope is env): + if not entry.type.create_to_py_utility_code(env): + error(entry.pos, "Cannot convert '%s' to Python object" % entry.type) + code.putln("{") + code.putln("PyObject* wrapped = %s(%s);" % ( + entry.type.to_py_function, + entry.cname)) + code.putln(code.error_goto_if_null("wrapped", entry.pos)) + code.putln( + 'if (PyObject_SetAttrString(%s, "%s", wrapped) < 0) %s;' % ( + env.module_cname, + name, + code.error_goto(entry.pos))) + code.putln("}") + + def generate_c_variable_export_code(self, env, code): + # Generate code to create PyCFunction wrappers for exported C functions. + entries = [] + for entry in env.var_entries: + if (entry.api + or entry.defined_in_pxd + or (Options.cimport_from_pyx and not entry.visibility == 'extern')): + entries.append(entry) + if entries: + env.use_utility_code(UtilityCode.load_cached("VoidPtrExport", "ImportExport.c")) + for entry in entries: + signature = entry.type.empty_declaration_code() + name = code.intern_identifier(entry.name) + code.putln('if (__Pyx_ExportVoidPtr(%s, (void *)&%s, "%s") < 0) %s' % ( + name, entry.cname, signature, + code.error_goto(self.pos))) + + def generate_c_function_export_code(self, env, code): + # Generate code to create PyCFunction wrappers for exported C functions. + entries = [] + for entry in env.cfunc_entries: + if (entry.api + or entry.defined_in_pxd + or (Options.cimport_from_pyx and not entry.visibility == 'extern')): + entries.append(entry) + if entries: + env.use_utility_code( + UtilityCode.load_cached("FunctionExport", "ImportExport.c")) + # Note: while this looks like it could be more cheaply stored and read from a struct array, + # investigation shows that the resulting binary is smaller with repeated functions calls. + for entry in entries: + signature = entry.type.signature_string() + code.putln('if (__Pyx_ExportFunction(%s, (void (*)(void))%s, "%s") < 0) %s' % ( + entry.name.as_c_string_literal(), + entry.cname, + signature, + code.error_goto(self.pos))) + + def generate_type_import_code_for_module(self, module, env, code): + # Generate type import code for all exported extension types in + # an imported module. + #if module.c_class_entries: + with ModuleImportGenerator(code) as import_generator: + for entry in module.c_class_entries: + if entry.defined_in_pxd: + self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) + + def specialize_fused_types(self, pxd_env): + """ + If fused c(p)def functions are defined in an imported pxd, but not + used in this implementation file, we still have fused entries and + not specialized ones. This method replaces any fused entries with their + specialized ones. + """ + for entry in pxd_env.cfunc_entries[:]: + if entry.type.is_fused: + # This call modifies the cfunc_entries in-place + entry.type.get_all_specialized_function_types() + + def generate_c_variable_import_code_for_module(self, module, env, code): + # Generate import code for all exported C functions in a cimported module. + entries = [] + for entry in module.var_entries: + if entry.defined_in_pxd: + entries.append(entry) + if entries: + env.use_utility_code( + UtilityCode.load_cached("VoidPtrImport", "ImportExport.c")) + temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln( + '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( + temp, + module.qualified_name, + temp, + code.error_goto(self.pos))) + code.put_gotref(temp, py_object_type) + for entry in entries: + if env is module: + cname = entry.cname + else: + cname = module.mangle(Naming.varptr_prefix, entry.name) + signature = entry.type.empty_declaration_code() + code.putln( + 'if (__Pyx_ImportVoidPtr_%s(%s, "%s", (void **)&%s, "%s") < 0) %s' % ( + Naming.cyversion, + temp, entry.name, cname, signature, + code.error_goto(self.pos))) + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + + def generate_c_function_import_code_for_module(self, module, env, code): + # Generate import code for all exported C functions in a cimported module. + entries = [] + for entry in module.cfunc_entries: + if entry.defined_in_pxd and entry.used: + entries.append(entry) + if entries: + env.use_utility_code( + UtilityCode.load_cached("FunctionImport", "ImportExport.c")) + temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln( + '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( + temp, + module.qualified_name, + temp, + code.error_goto(self.pos))) + code.put_gotref(temp, py_object_type) + for entry in entries: + code.putln( + 'if (__Pyx_ImportFunction_%s(%s, %s, (void (**)(void))&%s, "%s") < 0) %s' % ( + Naming.cyversion, + temp, + entry.name.as_c_string_literal(), + entry.cname, + entry.type.signature_string(), + code.error_goto(self.pos))) + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + + def generate_type_init_code(self, env, code): + # Generate type import code for extern extension types + # and type ready code for non-extern ones. + with ModuleImportGenerator(code) as import_generator: + for entry in env.c_class_entries: + if entry.visibility == 'extern' and not entry.utility_code_definition: + self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) + else: + self.generate_base_type_import_code(env, entry, code, import_generator) + self.generate_exttype_vtable_init_code(entry, code) + if entry.type.early_init: + self.generate_type_ready_code(entry, code) + + def generate_base_type_import_code(self, env, entry, code, import_generator): + base_type = entry.type.base_type + if (base_type and base_type.module_name != env.qualified_name and not + (base_type.is_builtin_type or base_type.is_cython_builtin_type) + and not entry.utility_code_definition): + self.generate_type_import_code(env, base_type, self.pos, code, import_generator) + + def generate_type_import_code(self, env, type, pos, code, import_generator): + # If not already done, generate code to import the typeobject of an + # extension type defined in another module, and extract its C method + # table pointer if any. + if type in env.types_imported: + return + if type.name not in Code.ctypedef_builtins_map: + # see corresponding condition in generate_type_import_call() below! + code.globalstate.use_utility_code( + UtilityCode.load_cached("TypeImport", "ImportExport.c")) + self.generate_type_import_call(type, code, import_generator, error_pos=pos) + if type.vtabptr_cname: + code.globalstate.use_utility_code( + UtilityCode.load_cached('GetVTable', 'ImportExport.c')) + code.putln("%s = (struct %s*)__Pyx_GetVtable(%s); %s" % ( + type.vtabptr_cname, + type.vtabstruct_cname, + type.typeptr_cname, + code.error_goto_if_null(type.vtabptr_cname, pos))) + env.types_imported.add(type) + + def generate_type_import_call(self, type, code, import_generator, error_code=None, error_pos=None): + if type.typedef_flag: + objstruct = type.objstruct_cname + else: + objstruct = "struct %s" % type.objstruct_cname + sizeof_objstruct = objstruct + module_name = type.module_name + condition = replacement = None + if module_name not in ('__builtin__', 'builtins'): + module_name = '"%s"' % module_name + elif type.name in Code.ctypedef_builtins_map: + # Fast path for special builtins, don't actually import + ctypename = Code.ctypedef_builtins_map[type.name] + code.putln('%s = %s;' % (type.typeptr_cname, ctypename)) + return + else: + module_name = '__Pyx_BUILTIN_MODULE_NAME' + if type.name in Code.non_portable_builtins_map: + condition, replacement = Code.non_portable_builtins_map[type.name] + if objstruct in Code.basicsize_builtins_map: + # Some builtin types have a tp_basicsize which differs from sizeof(...): + sizeof_objstruct = Code.basicsize_builtins_map[objstruct] + + if not error_code: + assert error_pos is not None + error_code = code.error_goto(error_pos) + + module = import_generator.imported_module(module_name, error_code) + code.put('%s = __Pyx_ImportType_%s(%s, %s,' % ( + type.typeptr_cname, + Naming.cyversion, + module, + module_name)) + + type_name = type.name.as_c_string_literal() + + if condition and replacement: + code.putln("") # start in new line + code.putln("#if %s" % condition) + code.putln('"%s",' % replacement) + code.putln("#else") + code.putln('%s,' % type_name) + code.putln("#endif") + else: + code.put(' %s, ' % type_name) + + if sizeof_objstruct != objstruct: + if not condition: + code.putln("") # start in new line + code.putln("#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000") + code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT_%s(%s),' % ( + objstruct, Naming.cyversion, objstruct)) + code.putln("#elif CYTHON_COMPILING_IN_LIMITED_API") + code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT_%s(%s),' % ( + objstruct, Naming.cyversion, objstruct)) + code.putln("#else") + code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT_%s(%s),' % ( + sizeof_objstruct, Naming.cyversion, sizeof_objstruct)) + code.putln("#endif") + else: + code.put('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT_%s(%s),' % ( + objstruct, Naming.cyversion, objstruct)) + + # check_size + if type.check_size and type.check_size in ('error', 'warn', 'ignore'): + check_size = type.check_size + elif not type.is_external or type.is_subclassed: + check_size = 'error' + else: + raise RuntimeError("invalid value for check_size '%s' when compiling %s.%s" % ( + type.check_size, module_name, type.name)) + code.put('__Pyx_ImportType_CheckSize_%s_%s);' % ( + check_size.title(), Naming.cyversion)) + + code.putln(' if (!%s) %s' % (type.typeptr_cname, error_code)) + + def generate_type_ready_code(self, entry, code): + Nodes.CClassDefNode.generate_type_ready_code(entry, code) + + def is_main_module_flag_cname(self): + full_module_name = self.full_module_name.replace('.', '__') + return self.punycode_module_name(Naming.module_is_main, full_module_name) + + def generate_exttype_vtable_init_code(self, entry, code): + # Generate code to initialise the C method table of an + # extension type. + type = entry.type + if type.vtable_cname: + code.putln( + "%s = &%s;" % ( + type.vtabptr_cname, + type.vtable_cname)) + if type.base_type and type.base_type.vtabptr_cname: + code.putln( + "%s.%s = *%s;" % ( + type.vtable_cname, + Naming.obj_base_cname, + type.base_type.vtabptr_cname)) + + c_method_entries = [ + entry for entry in type.scope.cfunc_entries + if entry.func_cname] + if c_method_entries: + for meth_entry in c_method_entries: + vtable_type = meth_entry.vtable_type or meth_entry.type + cast = vtable_type.signature_cast_string() + code.putln( + "%s.%s = %s%s;" % ( + type.vtable_cname, + meth_entry.cname, + cast, + meth_entry.func_cname)) + + +class ModuleImportGenerator(object): + """ + Helper to generate module import while importing external types. + This is used to avoid excessive re-imports of external modules when multiple types are looked up. + """ + def __init__(self, code, imported_modules=None): + self.code = code + self.imported = {} + if imported_modules: + for name, cname in imported_modules.items(): + self.imported['"%s"' % name] = cname + self.temps = [] # remember original import order for freeing + + def imported_module(self, module_name_string, error_code): + if module_name_string in self.imported: + return self.imported[module_name_string] + + code = self.code + temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + self.temps.append(temp) + code.putln('%s = PyImport_ImportModule(%s); if (unlikely(!%s)) %s' % ( + temp, module_name_string, temp, error_code)) + code.put_gotref(temp, py_object_type) + self.imported[module_name_string] = temp + return temp + + def __enter__(self): + return self + + def __exit__(self, *exc): + code = self.code + for temp in self.temps: + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + + +def generate_cfunction_declaration(entry, env, code, definition): + from_cy_utility = entry.used and entry.utility_code_definition + if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and ( + definition or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)): + if entry.visibility == 'extern': + storage_class = Naming.extern_c_macro + dll_linkage = "DL_IMPORT" + elif entry.visibility == 'public': + storage_class = Naming.extern_c_macro + dll_linkage = None + elif entry.visibility == 'private': + storage_class = "static" + dll_linkage = None + else: + storage_class = "static" + dll_linkage = None + type = entry.type + + if entry.defined_in_pxd and not definition: + storage_class = "static" + dll_linkage = None + type = CPtrType(type) + + header = type.declaration_code( + entry.cname, dll_linkage=dll_linkage) + modifiers = code.build_function_modifiers(entry.func_modifiers) + code.putln("%s %s%s; /*proto*/" % ( + storage_class, + modifiers, + header)) + +#------------------------------------------------------------------------------------ +# +# Runtime support code +# +#------------------------------------------------------------------------------------ + +refnanny_utility_code = UtilityCode.load("Refnanny", "ModuleSetupCode.c") + +packed_struct_utility_code = UtilityCode(proto=""" +#if defined(__GNUC__) +#define __Pyx_PACKED __attribute__((__packed__)) +#else +#define __Pyx_PACKED +#endif +""", impl="", proto_block='utility_code_proto_before_types') diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Nodes.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..6af916076009147607e8dfe7b25b4987fca5060a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Nodes.py @@ -0,0 +1,10420 @@ +# +# Parse tree nodes +# + +from __future__ import absolute_import + +import cython + +cython.declare(sys=object, os=object, copy=object, + Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object, + py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object, + StructOrUnionScope=object, PyClassScope=object, + CppClassScope=object, UtilityCode=object, EncodedString=object, + error_type=object, _py_int_types=object) + +import sys, copy +from itertools import chain + +from . import Builtin +from .Errors import error, warning, InternalError, CompileError, CannotSpecialize +from . import Naming +from . import PyrexTypes +from . import TypeSlots +from .PyrexTypes import py_object_type, error_type +from .Symtab import (ModuleScope, LocalScope, ClosureScope, PropertyScope, + StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope, GeneratorExpressionScope, + CppScopedEnumScope, punycodify_name) +from .Code import UtilityCode +from .StringEncoding import EncodedString +from . import Future +from . import Options +from . import DebugFlags +from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer +from ..Utils import add_metaclass, str_to_number + + +if sys.version_info[0] >= 3: + _py_int_types = int +else: + _py_int_types = (int, long) + + +IMPLICIT_CLASSMETHODS = {"__init_subclass__", "__class_getitem__"} + + +def relative_position(pos): + return (pos[0].get_filenametable_entry(), pos[1]) + + +def embed_position(pos, docstring): + if not Options.embed_pos_in_docstring: + return docstring + pos_line = u'File: %s (starting at line %s)' % relative_position(pos) + if docstring is None: + # unicode string + return EncodedString(pos_line) + + # make sure we can encode the filename in the docstring encoding + # otherwise make the docstring a unicode string + encoding = docstring.encoding + if encoding is not None: + try: + pos_line.encode(encoding) + except UnicodeEncodeError: + encoding = None + + if not docstring: + # reuse the string encoding of the original docstring + doc = EncodedString(pos_line) + else: + doc = EncodedString(pos_line + u'\n' + docstring) + doc.encoding = encoding + return doc + + +def write_func_call(func, codewriter_class): + def f(*args, **kwds): + if len(args) > 1 and isinstance(args[1], codewriter_class): + # here we annotate the code with this function call + # but only if new code is generated + node, code = args[:2] + marker = ' /* %s -> %s.%s %s */' % ( + ' ' * code.call_level, + node.__class__.__name__, + func.__name__, + node.pos[1:], + ) + insertion_point = code.insertion_point() + start = code.buffer.stream.tell() + code.call_level += 4 + res = func(*args, **kwds) + code.call_level -= 4 + if start != code.buffer.stream.tell(): + code.putln(marker.replace('->', '<-', 1)) + insertion_point.putln(marker) + return res + else: + return func(*args, **kwds) + return f + + +class VerboseCodeWriter(type): + # Set this as a metaclass to trace function calls in code. + # This slows down code generation and makes much larger files. + def __new__(cls, name, bases, attrs): + from types import FunctionType + from .Code import CCodeWriter + attrs = dict(attrs) + for mname, m in attrs.items(): + if isinstance(m, FunctionType): + attrs[mname] = write_func_call(m, CCodeWriter) + return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs) + + +class CheckAnalysers(type): + """Metaclass to check that type analysis functions return a node. + """ + methods = frozenset({ + 'analyse_types', + 'analyse_expressions', + 'analyse_target_types', + }) + + def __new__(cls, name, bases, attrs): + from types import FunctionType + def check(name, func): + def call(*args, **kwargs): + retval = func(*args, **kwargs) + if retval is None: + print('%s %s %s' % (name, args, kwargs)) + return retval + return call + + attrs = dict(attrs) + for mname, m in attrs.items(): + if isinstance(m, FunctionType) and mname in cls.methods: + attrs[mname] = check(mname, m) + return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs) + + +def _with_metaclass(cls): + if DebugFlags.debug_trace_code_generation: + return add_metaclass(VerboseCodeWriter)(cls) + #return add_metaclass(CheckAnalysers)(cls) + return cls + + +@_with_metaclass +class Node(object): + # pos (string, int, int) Source file position + # is_name boolean Is a NameNode + # is_literal boolean Is a ConstNode + + is_name = 0 + is_none = 0 + is_nonecheck = 0 + is_literal = 0 + is_terminator = 0 + is_wrapper = False # is a DefNode wrapper for a C function + is_cproperty = False + is_templated_type_node = False + temps = None + + # All descendants should set child_attrs to a list of the attributes + # containing nodes considered "children" in the tree. Each such attribute + # can either contain a single node or a list of nodes. See Visitor.py. + child_attrs = None + + # Subset of attributes that are evaluated in the outer scope (e.g. function default arguments). + outer_attrs = None + + cf_state = None + + # This may be an additional (or 'actual') type that will be checked when + # this node is coerced to another type. This could be useful to set when + # the actual type to which it can coerce is known, but you want to leave + # the type a py_object_type + coercion_type = None + + def __init__(self, pos, **kw): + self.pos = pos + self.__dict__.update(kw) + + gil_message = "Operation" + + nogil_check = None + in_nogil_context = False # For use only during code generation. + + def gil_error(self, env=None): + error(self.pos, "%s not allowed without gil" % self.gil_message) + + cpp_message = "Operation" + + def cpp_check(self, env): + if not env.is_cpp(): + self.cpp_error() + + def cpp_error(self): + error(self.pos, "%s only allowed in c++" % self.cpp_message) + + def clone_node(self): + """Clone the node. This is defined as a shallow copy, except for member lists + amongst the child attributes (from get_child_accessors) which are also + copied. Lists containing child nodes are thus seen as a way for the node + to hold multiple children directly; the list is not treated as a separate + level in the tree.""" + result = copy.copy(self) + for attrname in result.child_attrs: + value = getattr(result, attrname) + if isinstance(value, list): + setattr(result, attrname, [x for x in value]) + return result + + + # + # There are 3 main phases of parse tree processing, applied in order to + # all the statements in a given scope-block: + # + # (0) analyse_declarations + # Make symbol table entries for all declarations at the current + # level, both explicit (def, cdef, etc.) and implicit (assignment + # to an otherwise undeclared name). + # + # (1) analyse_expressions + # Determine the result types of expressions and fill in the + # 'type' attribute of each ExprNode. Insert coercion nodes into the + # tree where needed to convert to and from Python objects. + # Replace tree nodes with more appropriate implementations found by + # the type analysis. + # + # (2) generate_code + # Emit C code for all declarations, statements and expressions. + # + # These phases are triggered by tree transformations. + # See the full pipeline in Pipeline.py. + # + + def analyse_declarations(self, env): + pass + + def analyse_expressions(self, env): + raise InternalError("analyse_expressions not implemented for %s" % + self.__class__.__name__) + + def generate_code(self, code): + raise InternalError("generate_code not implemented for %s" % + self.__class__.__name__) + + def annotate(self, code): + # mro does the wrong thing + if isinstance(self, BlockNode): + self.body.annotate(code) + + def end_pos(self): + try: + return self._end_pos + except AttributeError: + pos = self.pos + if not self.child_attrs: + self._end_pos = pos + return pos + for attr in self.child_attrs: + child = getattr(self, attr) + # Sometimes lists, sometimes nodes + if child is None: + pass + elif isinstance(child, list): + for c in child: + pos = max(pos, c.end_pos()) + else: + pos = max(pos, child.end_pos()) + self._end_pos = pos + return pos + + def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None): + """Debug helper method that returns a recursive string representation of this node. + """ + if cutoff == 0: + return "<...nesting level cutoff...>" + if encountered is None: + encountered = set() + if id(self) in encountered: + return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self)) + encountered.add(id(self)) + + def dump_child(x, level): + if isinstance(x, Node): + return x.dump(level, filter_out, cutoff-1, encountered) + elif isinstance(x, list): + return "[%s]" % ", ".join([dump_child(item, level) for item in x]) + else: + return repr(x) + + attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out] + if len(attrs) == 0: + return "<%s (0x%x)>" % (self.__class__.__name__, id(self)) + else: + indent = " " * level + res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self)) + for key, value in attrs: + res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1)) + res += "%s>" % indent + return res + + def dump_pos(self, mark_column=False, marker='(#)'): + """Debug helper method that returns the source code context of this node as a string. + """ + if not self.pos: + return u'' + source_desc, line, col = self.pos + contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore') + # line numbers start at 1 + lines = contents[max(0, line-3):line] + current = lines[-1] + if mark_column: + current = current[:col] + marker + current[col:] + lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n' + lines += contents[line:line+2] + return u'"%s":%d:%d\n%s\n' % ( + source_desc.get_escaped_description(), line, col, u''.join(lines)) + + +class CompilerDirectivesNode(Node): + """ + Sets compiler directives for the children nodes + """ + # directives {string:value} A dictionary holding the right value for + # *all* possible directives. + # body Node + child_attrs = ["body"] + + def analyse_declarations(self, env): + old = env.directives + env.directives = self.directives + self.body.analyse_declarations(env) + env.directives = old + + def analyse_expressions(self, env): + old = env.directives + env.directives = self.directives + self.body = self.body.analyse_expressions(env) + env.directives = old + return self + + def generate_function_definitions(self, env, code): + env_old = env.directives + code_old = code.globalstate.directives + code.globalstate.directives = self.directives + self.body.generate_function_definitions(env, code) + env.directives = env_old + code.globalstate.directives = code_old + + def generate_execution_code(self, code): + old = code.globalstate.directives + code.globalstate.directives = self.directives + self.body.generate_execution_code(code) + code.globalstate.directives = old + + def annotate(self, code): + old = code.globalstate.directives + code.globalstate.directives = self.directives + self.body.annotate(code) + code.globalstate.directives = old + + +class BlockNode(object): + # Mixin class for nodes representing a declaration block. + + def generate_cached_builtins_decls(self, env, code): + entries = env.global_scope().undeclared_cached_builtins + for entry in entries: + code.globalstate.add_cached_builtin_decl(entry) + del entries[:] + + def generate_lambda_definitions(self, env, code): + for node in env.lambda_defs: + node.generate_function_definitions(env, code) + + +class StatListNode(Node): + # stats a list of StatNode + + child_attrs = ["stats"] + + @staticmethod + def create_analysed(pos, env, **kw): + node = StatListNode(pos, **kw) + return node # No node-specific analysis needed + + def analyse_declarations(self, env): + #print "StatListNode.analyse_declarations" ### + for stat in self.stats: + stat.analyse_declarations(env) + + def analyse_expressions(self, env): + #print "StatListNode.analyse_expressions" ### + self.stats = [stat.analyse_expressions(env) + for stat in self.stats] + return self + + def generate_function_definitions(self, env, code): + #print "StatListNode.generate_function_definitions" ### + for stat in self.stats: + stat.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + #print "StatListNode.generate_execution_code" ### + for stat in self.stats: + code.mark_pos(stat.pos) + stat.generate_execution_code(code) + + def annotate(self, code): + for stat in self.stats: + stat.annotate(code) + + +class StatNode(Node): + # + # Code generation for statements is split into the following subphases: + # + # (1) generate_function_definitions + # Emit C code for the definitions of any structs, + # unions, enums and functions defined in the current + # scope-block. + # + # (2) generate_execution_code + # Emit C code for executable statements. + # + + def generate_function_definitions(self, env, code): + pass + + def generate_execution_code(self, code): + raise InternalError("generate_execution_code not implemented for %s" % + self.__class__.__name__) + + +class CDefExternNode(StatNode): + # include_file string or None + # verbatim_include string or None + # body StatListNode + + child_attrs = ["body"] + + def analyse_declarations(self, env): + old_cinclude_flag = env.in_cinclude + env.in_cinclude = 1 + self.body.analyse_declarations(env) + env.in_cinclude = old_cinclude_flag + + if self.include_file or self.verbatim_include: + # Determine whether include should be late + stats = self.body.stats + if not env.directives['preliminary_late_includes_cy28']: + late = False + elif not stats: + # Special case: empty 'cdef extern' blocks are early + late = False + else: + late = all(isinstance(node, CVarDefNode) for node in stats) + env.add_include_file(self.include_file, self.verbatim_include, late) + + def analyse_expressions(self, env): + # Allow C properties, inline methods, etc. also in external types. + self.body = self.body.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + pass + + def annotate(self, code): + self.body.annotate(code) + + +class CDeclaratorNode(Node): + # Part of a C declaration. + # + # Processing during analyse_declarations phase: + # + # analyse + # Returns (name, type) pair where name is the + # CNameDeclaratorNode of the name being declared + # and type is the type it is being declared as. + # + # calling_convention string Calling convention of CFuncDeclaratorNode + # for which this is a base + + child_attrs = [] + + calling_convention = "" + + def declared_name(self): + return None + + def analyse_templates(self): + # Only C++ functions have templates. + return None + + +class CNameDeclaratorNode(CDeclaratorNode): + # name string The Cython name being declared + # cname string or None C name, if specified + # default ExprNode or None the value assigned on declaration + + child_attrs = ['default'] + + default = None + + def declared_name(self): + return self.name + + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if nonempty and self.name == '': + # May have mistaken the name for the type. + if base_type.is_ptr or base_type.is_array or base_type.is_buffer: + error(self.pos, "Missing argument name") + elif base_type.is_void: + error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.") + else: + self.name = base_type.declaration_code("", for_display=1, pyrex=1) + base_type = py_object_type + + if base_type.is_fused and env.fused_to_specific: + try: + base_type = base_type.specialize(env.fused_to_specific) + except CannotSpecialize: + error(self.pos, + "'%s' cannot be specialized since its type is not a fused argument to this function" % + self.name) + + self.type = base_type + return self, base_type + + +class CPtrDeclaratorNode(CDeclaratorNode): + # base CDeclaratorNode + + child_attrs = ["base"] + + def declared_name(self): + return self.base.declared_name() + + def analyse_templates(self): + return self.base.analyse_templates() + + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if base_type.is_pyobject: + error(self.pos, "Pointer base type cannot be a Python object") + ptr_type = PyrexTypes.c_ptr_type(base_type) + return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + + +class _CReferenceDeclaratorBaseNode(CDeclaratorNode): + child_attrs = ["base"] + + def declared_name(self): + return self.base.declared_name() + + def analyse_templates(self): + return self.base.analyse_templates() + + +class CReferenceDeclaratorNode(_CReferenceDeclaratorBaseNode): + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if base_type.is_pyobject: + error(self.pos, "Reference base type cannot be a Python object") + ref_type = PyrexTypes.c_ref_type(base_type) + return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + + +class CppRvalueReferenceDeclaratorNode(_CReferenceDeclaratorBaseNode): + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if base_type.is_pyobject: + error(self.pos, "Rvalue-reference base type cannot be a Python object") + ref_type = PyrexTypes.cpp_rvalue_ref_type(base_type) + return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + + +class CArrayDeclaratorNode(CDeclaratorNode): + # base CDeclaratorNode + # dimension ExprNode + + child_attrs = ["base", "dimension"] + + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if ((base_type.is_cpp_class and base_type.is_template_type()) or + base_type.is_cfunction or + base_type.python_type_constructor_name): + from .ExprNodes import TupleNode + if isinstance(self.dimension, TupleNode): + args = self.dimension.args + else: + args = self.dimension, + values = [v.analyse_as_type(env) for v in args] + if None in values: + ix = values.index(None) + error(args[ix].pos, "Template parameter not a type") + base_type = error_type + else: + base_type = base_type.specialize_here(self.pos, env, values) + return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + if self.dimension: + self.dimension = self.dimension.analyse_const_expression(env) + if not self.dimension.type.is_int: + error(self.dimension.pos, "Array dimension not integer") + size = self.dimension.get_constant_c_result_code() + if size is not None: + try: + size = int(size) + except ValueError: + # runtime constant? + pass + else: + size = None + if not base_type.is_complete(): + error(self.pos, "Array element type '%s' is incomplete" % base_type) + if base_type.is_pyobject: + error(self.pos, "Array element cannot be a Python object") + if base_type.is_cfunction: + error(self.pos, "Array element cannot be a function") + array_type = PyrexTypes.c_array_type(base_type, size) + return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + + +class CFuncDeclaratorNode(CDeclaratorNode): + # base CDeclaratorNode + # args [CArgDeclNode] + # templates [TemplatePlaceholderType] + # has_varargs boolean + # exception_value ConstNode or NameNode NameNode when the name of a c++ exception conversion function + # exception_check boolean or "+" True if PyErr_Occurred check needed, "+" for a c++ check + # has_explicit_exc_clause boolean True if exception clause is explicitly declared + # nogil boolean Can be called without gil + # with_gil boolean Acquire gil around function body + # is_const_method boolean Whether this is a const method + + child_attrs = ["base", "args", "exception_value"] + + overridable = 0 + optional_arg_count = 0 + is_const_method = 0 + templates = None + + def declared_name(self): + return self.base.declared_name() + + def analyse_templates(self): + if isinstance(self.base, CArrayDeclaratorNode): + from .ExprNodes import TupleNode, NameNode + template_node = self.base.dimension + if isinstance(template_node, TupleNode): + template_nodes = template_node.args + elif isinstance(template_node, NameNode): + template_nodes = [template_node] + else: + error(template_node.pos, "Template arguments must be a list of names") + return None + self.templates = [] + for template in template_nodes: + if isinstance(template, NameNode): + self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name)) + else: + error(template.pos, "Template arguments must be a list of names") + self.base = self.base.base + return self.templates + else: + return None + + def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False): + if directive_locals is None: + directive_locals = {} + if nonempty: + nonempty -= 1 + func_type_args = [] + for i, arg_node in enumerate(self.args): + name_declarator, type = arg_node.analyse( + env, nonempty=nonempty, + is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives)) + name = name_declarator.name + if name in directive_locals: + type_node = directive_locals[name] + other_type = type_node.analyse_as_type(env) + if other_type is None: + error(type_node.pos, "Not a type") + elif (type is not PyrexTypes.py_object_type + and not type.same_as(other_type)): + error(self.base.pos, "Signature does not agree with previous declaration") + error(type_node.pos, "Previous declaration here") + else: + type = other_type + if name_declarator.cname: + error(self.pos, "Function argument cannot have C name specification") + if i == 0 and env.is_c_class_scope and type.is_unspecified: + # fix the type of self + type = env.parent_type + # Turn *[] argument into ** + if type.is_array: + type = PyrexTypes.c_ptr_type(type.base_type) + # Catch attempted C-style func(void) decl + if type.is_void: + error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.") + func_type_args.append( + PyrexTypes.CFuncTypeArg(name, type, arg_node.pos)) + if arg_node.default: + self.optional_arg_count += 1 + elif self.optional_arg_count: + error(self.pos, "Non-default argument follows default argument") + + exc_val = None + exc_check = 0 + + if (env.directives["legacy_implicit_noexcept"] + and not return_type.is_pyobject + and not self.has_explicit_exc_clause + and self.exception_check + and visibility != 'extern'): + # If function is already declared from pxd, the exception_check has already correct value. + if not (self.declared_name() in env.entries and not in_pxd): + self.exception_check = False + # implicit noexcept, with a warning + warning(self.pos, + "Implicit noexcept declaration is deprecated." + " Function declaration should contain 'noexcept' keyword.", + level=2) + + if self.exception_check == '+': + env.add_include_file('ios') # for std::ios_base::failure + env.add_include_file('new') # for std::bad_alloc + env.add_include_file('stdexcept') + env.add_include_file('typeinfo') # for std::bad_cast + elif return_type.is_pyobject and self.exception_check: + # Functions in pure Python mode default to always check return values for exceptions + # (equivalent to the "except*" declaration). In this case, the exception clause + # is silently ignored for functions returning a Python object. + self.exception_check = False + + if (return_type.is_pyobject + and (self.exception_value or self.exception_check) + and self.exception_check != '+'): + error(self.pos, "Exception clause not allowed for function returning Python object") + elif return_type.is_pyobject and not self.exception_check and visibility != 'extern' and self.has_explicit_exc_clause: + warning(self.pos, "noexcept clause is ignored for function returning Python object", 1) + else: + if self.exception_value is None and self.exception_check and self.exception_check != '+': + # Use an explicit exception return value to speed up exception checks. + # Even if it is not declared, we can use the default exception value of the return type, + # unless the function is some kind of external function that we do not control. + if (return_type.exception_value is not None and (visibility != 'extern' and not in_pxd)): + # - We skip this optimization for extension types; they are more difficult because + # the signature must match the base type signature. + # - Same for function pointers, as we want them to be able to match functions + # with any exception value. + # - Ideally the function-pointer test would be better after self.base is analysed + # however that is hard to do with the current implementation so it lives here + # for now. + if not env.is_c_class_scope and not isinstance(self.base, CPtrDeclaratorNode): + from .ExprNodes import ConstNode + self.exception_value = ConstNode( + self.pos, value=return_type.exception_value, type=return_type) + if self.exception_value: + if self.exception_check == '+': + self.exception_value = self.exception_value.analyse_const_expression(env) + exc_val_type = self.exception_value.type + if (not exc_val_type.is_error + and not exc_val_type.is_pyobject + and not (exc_val_type.is_cfunction + and not exc_val_type.return_type.is_pyobject + and not exc_val_type.args) + and not (exc_val_type == PyrexTypes.c_char_type + and self.exception_value.value == '*')): + error(self.exception_value.pos, + "Exception value must be a Python exception, or C++ function with no arguments, or *.") + exc_val = self.exception_value + else: + self.exception_value = self.exception_value.analyse_types(env).coerce_to( + return_type, env).analyse_const_expression(env) + exc_val = self.exception_value.get_constant_c_result_code() + if exc_val is None: + error(self.exception_value.pos, "Exception value must be constant") + if not return_type.assignable_from(self.exception_value.type): + error(self.exception_value.pos, + "Exception value incompatible with function return type") + if (visibility != 'extern' + and (return_type.is_int or return_type.is_float) + and self.exception_value.has_constant_result()): + try: + type_default_value = float(return_type.default_value) + except ValueError: + pass + else: + if self.exception_value.constant_result == type_default_value: + warning(self.pos, "Ambiguous exception value, same as default return value: %r" % + self.exception_value.constant_result) + exc_check = self.exception_check + if return_type.is_cfunction: + error(self.pos, "Function cannot return a function") + func_type = PyrexTypes.CFuncType( + return_type, func_type_args, self.has_varargs, + optional_arg_count=self.optional_arg_count, + exception_value=exc_val, exception_check=exc_check, + calling_convention=self.base.calling_convention, + nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable, + is_const_method=self.is_const_method, + templates=self.templates) + + if self.optional_arg_count: + if func_type.is_fused: + # This is a bit of a hack... When we need to create specialized CFuncTypes + # on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg + # struct + def declare_opt_arg_struct(func_type, fused_cname): + self.declare_optional_arg_struct(func_type, env, fused_cname) + + func_type.declare_opt_arg_struct = declare_opt_arg_struct + else: + self.declare_optional_arg_struct(func_type, env) + + callspec = env.directives['callspec'] + if callspec: + current = func_type.calling_convention + if current and current != callspec: + error(self.pos, "cannot have both '%s' and '%s' " + "calling conventions" % (current, callspec)) + func_type.calling_convention = callspec + + if func_type.return_type.is_rvalue_reference: + warning(self.pos, "Rvalue-reference as function return type not supported", 1) + for arg in func_type.args: + if arg.type.is_rvalue_reference and not arg.is_forwarding_reference(): + warning(self.pos, "Rvalue-reference as function argument not supported", 1) + + return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd) + + def declare_optional_arg_struct(self, func_type, env, fused_cname=None): + """ + Declares the optional argument struct (the struct used to hold the + values for optional arguments). For fused cdef functions, this is + deferred as analyse_declarations is called only once (on the fused + cdef function). + """ + scope = StructOrUnionScope() + arg_count_member = '%sn' % Naming.pyrex_prefix + scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos) + + for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]: + scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True) + + struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name) + + if fused_cname is not None: + struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname) + + op_args_struct = env.global_scope().declare_struct_or_union( + name=struct_cname, + kind='struct', + scope=scope, + typedef_flag=0, + pos=self.pos, + cname=struct_cname) + + op_args_struct.defined_in_pxd = 1 + op_args_struct.used = 1 + + func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type) + + +class CConstDeclaratorNode(CDeclaratorNode): + # base CDeclaratorNode + + child_attrs = ["base"] + + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): + if base_type.is_pyobject: + error(self.pos, + "Const base type cannot be a Python object") + const = PyrexTypes.c_const_type(base_type) + return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) + + +class CArgDeclNode(Node): + # Item in a function declaration argument list. + # + # base_type CBaseTypeNode + # declarator CDeclaratorNode + # not_none boolean Tagged with 'not None' + # or_none boolean Tagged with 'or None' + # accept_none boolean Resolved boolean for not_none/or_none + # default ExprNode or None + # default_value PyObjectConst constant for default value + # annotation ExprNode or None Py3 function arg annotation + # is_self_arg boolean Is the "self" arg of an extension type method + # is_type_arg boolean Is the "class" arg of an extension type classmethod + # kw_only boolean Is a keyword-only argument + # is_dynamic boolean Non-literal arg stored inside CyFunction + # pos_only boolean Is a positional-only argument + # + # name_cstring property that converts the name to a cstring taking care of unicode + # and quoting it + + child_attrs = ["base_type", "declarator", "default", "annotation"] + outer_attrs = ["default", "annotation"] + + is_self_arg = 0 + is_type_arg = 0 + is_generic = 1 + is_special_method_optional = False + kw_only = 0 + pos_only = 0 + not_none = 0 + or_none = 0 + type = None + name_declarator = None + default_value = None + annotation = None + is_dynamic = 0 + + def declared_name(self): + return self.declarator.declared_name() + + @property + def name_cstring(self): + return self.name.as_c_string_literal() + + @property + def hdr_cname(self): + # done lazily - needs self.entry to be set to get the class-mangled + # name, which means it has to be generated relatively late + if self.needs_conversion: + return punycodify_name(Naming.arg_prefix + self.entry.name) + else: + return punycodify_name(Naming.var_prefix + self.entry.name) + + + def analyse(self, env, nonempty=0, is_self_arg=False): + if is_self_arg: + self.base_type.is_self_arg = self.is_self_arg = is_self_arg + if self.type is not None: + return self.name_declarator, self.type + + # The parser may misinterpret names as types. We fix that here. + if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '': + if nonempty: + if self.base_type.is_basic_c_type: + # char, short, long called "int" + type = self.base_type.analyse(env, could_be_name=True) + arg_name = type.empty_declaration_code() + else: + arg_name = self.base_type.name + self.declarator.name = EncodedString(arg_name) + self.base_type.name = None + self.base_type.is_basic_c_type = False + could_be_name = True + else: + could_be_name = False + self.base_type.is_arg = True + base_type = self.base_type.analyse(env, could_be_name=could_be_name) + base_arg_name = getattr(self.base_type, 'arg_name', None) + if base_arg_name: + self.declarator.name = base_arg_name + + # The parser is unable to resolve the ambiguity of [] as part of the + # type (e.g. in buffers) or empty declarator (as with arrays). + # This is only arises for empty multi-dimensional arrays. + if (base_type.is_array + and isinstance(self.base_type, TemplatedTypeNode) + and isinstance(self.declarator, CArrayDeclaratorNode)): + declarator = self.declarator + while isinstance(declarator.base, CArrayDeclaratorNode): + declarator = declarator.base + declarator.base = self.base_type.array_declarator + base_type = base_type.base_type + + # inject type declaration from annotations + # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis + if (self.annotation and env and env.directives['annotation_typing'] + # CSimpleBaseTypeNode has a name attribute; CAnalysedBaseTypeNode + # (and maybe other options) doesn't + and getattr(self.base_type, "name", None) is None): + arg_type = self.inject_type_from_annotations(env) + if arg_type is not None: + base_type = arg_type + return self.declarator.analyse(base_type, env, nonempty=nonempty) + + def inject_type_from_annotations(self, env): + annotation = self.annotation + if not annotation: + return None + + modifiers, arg_type = annotation.analyse_type_annotation(env, assigned_value=self.default) + if arg_type is not None: + self.base_type = CAnalysedBaseTypeNode( + annotation.pos, type=arg_type, is_arg=True) + + if arg_type: + if "typing.Optional" in modifiers: + # "x: Optional[...]" => explicitly allow 'None' + arg_type = arg_type.resolve() + if arg_type and not arg_type.can_be_optional(): + # We probably already reported this as "cannot be applied to non-Python type". + # error(annotation.pos, "Only Python type arguments can use typing.Optional[...]") + pass + else: + self.or_none = True + elif arg_type is py_object_type: + # exclude ": object" from the None check - None is a generic object. + self.or_none = True + elif self.default and self.default.is_none and (arg_type.can_be_optional() or arg_type.equivalent_type): + # "x: ... = None" => implicitly allow 'None' + if not arg_type.can_be_optional(): + arg_type = arg_type.equivalent_type + if not self.or_none: + warning(self.pos, "PEP-484 recommends 'typing.Optional[...]' for arguments that can be None.") + self.or_none = True + elif not self.or_none and arg_type.can_be_optional(): + self.not_none = True + + return arg_type + + def calculate_default_value_code(self, code): + if self.default_value is None: + if self.default: + if self.default.is_literal: + # will not output any code, just assign the result_code + self.default.generate_evaluation_code(code) + return self.type.cast_code(self.default.result()) + self.default_value = code.get_argument_default_const(self.type) + return self.default_value + + def annotate(self, code): + if self.default: + self.default.annotate(code) + + def generate_assignment_code(self, code, target=None, overloaded_assignment=False): + default = self.default + if default is None or default.is_literal: + return + if target is None: + target = self.calculate_default_value_code(code) + default.generate_evaluation_code(code) + default.make_owned_reference(code) + result = default.result() if overloaded_assignment else default.result_as(self.type) + code.putln("%s = %s;" % (target, result)) + code.put_giveref(default.result(), self.type) + default.generate_post_assignment_code(code) + default.free_temps(code) + + +class CBaseTypeNode(Node): + # Abstract base class for C base type nodes. + # + # Processing during analyse_declarations phase: + # + # analyse + # Returns the type. + + def analyse_as_type(self, env): + return self.analyse(env) + + +class CAnalysedBaseTypeNode(Node): + # type type + + child_attrs = [] + + def analyse(self, env, could_be_name=False): + return self.type + + +class CSimpleBaseTypeNode(CBaseTypeNode): + # name string + # module_path [string] Qualifying name components + # is_basic_c_type boolean + # signed boolean + # longness integer + # complex boolean + # is_self_arg boolean Is self argument of C method + # ##is_type_arg boolean Is type argument of class method + + child_attrs = [] + arg_name = None # in case the argument name was interpreted as a type + module_path = [] + is_basic_c_type = False + complex = False + is_self_arg = False + + def analyse(self, env, could_be_name=False): + # Return type descriptor. + #print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ### + type = None + if self.is_basic_c_type: + type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name) + if not type: + error(self.pos, "Unrecognised type modifier combination") + elif self.name == "object" and not self.module_path: + type = py_object_type + elif self.name is None: + if self.is_self_arg and env.is_c_class_scope: + #print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ### + type = env.parent_type + ## elif self.is_type_arg and env.is_c_class_scope: + ## type = Builtin.type_type + else: + type = py_object_type + else: + scope = env + if self.module_path: + # Maybe it's a nested C++ class. + for item in self.module_path: + entry = scope.lookup(item) + if entry is not None and ( + entry.is_cpp_class or + entry.is_type and entry.type.is_cpp_class + ): + scope = entry.type.scope + elif entry and entry.as_module: + scope = entry.as_module + else: + scope = None + break + if scope is None and len(self.module_path) == 1: + # (may be possible to handle longer module paths?) + # TODO: probably not the best place to declare it? + from .Builtin import get_known_standard_library_module_scope + found_entry = env.lookup(self.module_path[0]) + if found_entry and found_entry.known_standard_library_import: + scope = get_known_standard_library_module_scope(found_entry.known_standard_library_import) + if scope is None: + # Maybe it's a cimport. + scope = env.find_imported_module(self.module_path, self.pos) + + if scope: + if scope.is_c_class_scope: + scope = scope.global_scope() + + type = scope.lookup_type(self.name) + if type is not None: + pass + elif could_be_name: + if self.is_self_arg and env.is_c_class_scope: + type = env.parent_type + ## elif self.is_type_arg and env.is_c_class_scope: + ## type = Builtin.type_type + else: + type = py_object_type + self.arg_name = EncodedString(self.name) + else: + if self.templates: + if self.name not in self.templates: + error(self.pos, "'%s' is not a type identifier" % self.name) + type = PyrexTypes.TemplatePlaceholderType(self.name) + else: + error(self.pos, "'%s' is not a type identifier" % self.name) + if type and type.is_fused and env.fused_to_specific: + type = type.specialize(env.fused_to_specific) + if self.complex: + if not type.is_numeric or type.is_complex: + error(self.pos, "can only complexify c numeric types") + type = PyrexTypes.CComplexType(type) + type.create_declaration_utility_code(env) + elif type is Builtin.complex_type: + # Special case: optimise builtin complex type into C's + # double complex. The parser cannot do this (as for the + # normal scalar types) as the user may have redeclared the + # 'complex' type. Testing for the exact type here works. + type = PyrexTypes.c_double_complex_type + type.create_declaration_utility_code(env) + self.complex = True + if not type: + type = PyrexTypes.error_type + return type + +class MemoryViewSliceTypeNode(CBaseTypeNode): + + name = 'memoryview' + child_attrs = ['base_type_node', 'axes'] + + def analyse(self, env, could_be_name=False): + + base_type = self.base_type_node.analyse(env) + if base_type.is_error: return base_type + + from . import MemoryView + + try: + axes_specs = MemoryView.get_axes_specs(env, self.axes) + except CompileError as e: + error(e.position, e.message_only) + self.type = PyrexTypes.ErrorType() + return self.type + + if not MemoryView.validate_axes(self.pos, axes_specs): + self.type = error_type + else: + self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs) + self.type.validate_memslice_dtype(self.pos) + self.use_memview_utilities(env) + + return self.type + + def use_memview_utilities(self, env): + from . import MemoryView + env.use_utility_code(MemoryView.view_utility_code) + + +class CNestedBaseTypeNode(CBaseTypeNode): + # For C++ classes that live inside other C++ classes. + + # name string + # base_type CBaseTypeNode + + child_attrs = ['base_type'] + + def analyse(self, env, could_be_name=None): + base_type = self.base_type.analyse(env) + if base_type is PyrexTypes.error_type: + return PyrexTypes.error_type + if not base_type.is_cpp_class: + error(self.pos, "'%s' is not a valid type scope" % base_type) + return PyrexTypes.error_type + type_entry = base_type.scope.lookup_here(self.name) + if not type_entry or not type_entry.is_type: + error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name)) + return PyrexTypes.error_type + return type_entry.type + + +class TemplatedTypeNode(CBaseTypeNode): + # After parsing: + # positional_args [ExprNode] List of positional arguments + # keyword_args DictNode Keyword arguments + # base_type_node CBaseTypeNode + + # After analysis: + # type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options + + child_attrs = ["base_type_node", "positional_args", + "keyword_args", "dtype_node"] + + is_templated_type_node = True + dtype_node = None + name = None + + def _analyse_template_types(self, env, base_type): + require_optional_types = base_type.python_type_constructor_name == 'typing.Optional' + require_python_types = base_type.python_type_constructor_name == 'dataclasses.ClassVar' + + in_c_type_context = env.in_c_type_context and not require_python_types + + template_types = [] + for template_node in self.positional_args: + # CBaseTypeNode -> allow C type declarations in a 'cdef' context again + with env.new_c_type_context(in_c_type_context or isinstance(template_node, CBaseTypeNode)): + ttype = template_node.analyse_as_type(env) + if ttype is None: + if base_type.is_cpp_class: + error(template_node.pos, "unknown type in template argument") + ttype = error_type + # For Python generics we can be a bit more flexible and allow None. + elif require_python_types and not ttype.is_pyobject or require_optional_types and not ttype.can_be_optional(): + if ttype.equivalent_type and not template_node.as_cython_attribute(): + ttype = ttype.equivalent_type + else: + error(template_node.pos, "%s[...] cannot be applied to type %s" % ( + base_type.python_type_constructor_name, + ttype, + )) + ttype = error_type + template_types.append(ttype) + + return template_types + + def analyse(self, env, could_be_name=False, base_type=None): + if base_type is None: + base_type = self.base_type_node.analyse(env) + if base_type.is_error: return base_type + + if ((base_type.is_cpp_class and base_type.is_template_type()) or + base_type.python_type_constructor_name): + # Templated class, Python generics, etc. + if self.keyword_args and self.keyword_args.key_value_pairs: + tp = "c++ templates" if base_type.is_cpp_class else "indexed types" + error(self.pos, "%s cannot take keyword arguments" % tp) + self.type = PyrexTypes.error_type + return self.type + + template_types = self._analyse_template_types(env, base_type) + self.type = base_type.specialize_here(self.pos, env, template_types) + + elif base_type.is_pyobject: + # Buffer + from . import Buffer + + options = Buffer.analyse_buffer_options( + self.pos, + env, + self.positional_args, + self.keyword_args, + base_type.buffer_defaults) + + if sys.version_info[0] < 3: + # Py 2.x enforces byte strings as keyword arguments ... + options = dict([(name.encode('ASCII'), value) + for name, value in options.items()]) + + self.type = PyrexTypes.BufferType(base_type, **options) + if has_np_pythran(env) and is_pythran_buffer(self.type): + self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type) + + else: + # Array + empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None) + if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs: + error(self.pos, "invalid array declaration") + self.type = PyrexTypes.error_type + else: + # It would be nice to merge this class with CArrayDeclaratorNode, + # but arrays are part of the declaration, not the type... + if not self.positional_args: + dimension = None + else: + dimension = self.positional_args[0] + self.array_declarator = CArrayDeclaratorNode( + self.pos, + base=empty_declarator, + dimension=dimension) + self.type = self.array_declarator.analyse(base_type, env)[1] + + if self.type and self.type.is_fused and env.fused_to_specific: + try: + self.type = self.type.specialize(env.fused_to_specific) + except CannotSpecialize: + error(self.pos, + "'%s' cannot be specialized since its type is not a fused argument to this function" % + self.name) + + return self.type + + def analyse_pytyping_modifiers(self, env): + # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]" + # TODO: somehow bring this together with IndexNode.analyse_pytyping_modifiers() + modifiers = [] + modifier_node = self + while modifier_node.is_templated_type_node and modifier_node.base_type_node and len(modifier_node.positional_args) == 1: + modifier_type = self.base_type_node.analyse_as_type(env) + if modifier_type.python_type_constructor_name and modifier_type.modifier_name: + modifiers.append(modifier_type.modifier_name) + modifier_node = modifier_node.positional_args[0] + + return modifiers + + +class CComplexBaseTypeNode(CBaseTypeNode): + # base_type CBaseTypeNode + # declarator CDeclaratorNode + + child_attrs = ["base_type", "declarator"] + + def analyse(self, env, could_be_name=False): + base = self.base_type.analyse(env, could_be_name) + _, type = self.declarator.analyse(base, env) + return type + + +class CTupleBaseTypeNode(CBaseTypeNode): + # components [CBaseTypeNode] + + child_attrs = ["components"] + + def analyse(self, env, could_be_name=False): + component_types = [] + for c in self.components: + type = c.analyse(env) + if type.is_pyobject: + error(c.pos, "Tuple types can't (yet) contain Python objects.") + return error_type + component_types.append(type) + entry = env.declare_tuple_type(self.pos, component_types) + entry.used = True + return entry.type + + +class FusedTypeNode(CBaseTypeNode): + """ + Represents a fused type in a ctypedef statement: + + ctypedef cython.fused_type(int, long, long long) integral + + name str name of this fused type + types [CSimpleBaseTypeNode] is the list of types to be fused + """ + + child_attrs = [] + + def analyse_declarations(self, env): + type = self.analyse(env) + entry = env.declare_typedef(self.name, type, self.pos) + + # Omit the typedef declaration that self.declarator would produce + entry.in_cinclude = True + + def analyse(self, env, could_be_name=False): + types = [] + for type_node in self.types: + type = type_node.analyse_as_type(env) + + if not type: + error(type_node.pos, "Not a type") + continue + + if type in types: + error(type_node.pos, "Type specified multiple times") + else: + types.append(type) + + # if len(self.types) == 1: + # return types[0] + + return PyrexTypes.FusedType(types, name=self.name) + + +class CConstOrVolatileTypeNode(CBaseTypeNode): + # base_type CBaseTypeNode + # is_const boolean + # is_volatile boolean + + child_attrs = ["base_type"] + + def analyse(self, env, could_be_name=False): + base = self.base_type.analyse(env, could_be_name) + if base.is_pyobject: + error(self.pos, + "Const/volatile base type cannot be a Python object") + return PyrexTypes.c_const_or_volatile_type(base, self.is_const, self.is_volatile) + + +class CVarDefNode(StatNode): + # C variable definition or forward/extern function declaration. + # + # visibility 'private' or 'public' or 'extern' + # base_type CBaseTypeNode + # declarators [CDeclaratorNode] + # in_pxd boolean + # api boolean + # overridable boolean whether it is a cpdef + # modifiers ['inline'] + + # decorators [cython.locals(...)] or None + # directive_locals { string : NameNode } locals defined by cython.locals(...) + + child_attrs = ["base_type", "declarators"] + + decorators = None + directive_locals = None + + def analyse_declarations(self, env, dest_scope=None): + if self.directive_locals is None: + self.directive_locals = {} + if not dest_scope: + dest_scope = env + self.dest_scope = dest_scope + + if self.declarators: + templates = self.declarators[0].analyse_templates() + else: + templates = None + if templates is not None: + if self.visibility != 'extern': + error(self.pos, "Only extern functions allowed") + if len(self.declarators) > 1: + error(self.declarators[1].pos, "Can't multiply declare template types") + env = TemplateScope('func_template', env) + env.directives = env.outer_scope.directives + for template_param in templates: + env.declare_type(template_param.name, template_param, self.pos) + + base_type = self.base_type.analyse(env) + + # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]" + modifiers = None + if self.base_type.is_templated_type_node: + modifiers = self.base_type.analyse_pytyping_modifiers(env) + + if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or + env.is_module_scope): + error(self.pos, "Fused types not allowed here") + return error_type + + self.entry = None + visibility = self.visibility + + for declarator in self.declarators: + + if (len(self.declarators) > 1 + and not isinstance(declarator, CNameDeclaratorNode) + and env.directives['warn.multiple_declarators']): + warning( + declarator.pos, + "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " + "Each pointer declaration should be on its own line.", 1) + + create_extern_wrapper = (self.overridable + and self.visibility == 'extern' + and env.is_module_scope) + if create_extern_wrapper: + declarator.overridable = False + if isinstance(declarator, CFuncDeclaratorNode): + name_declarator, type = declarator.analyse( + base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd) + else: + name_declarator, type = declarator.analyse( + base_type, env, visibility=visibility, in_pxd=self.in_pxd) + if not type.is_complete(): + if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice): + error(declarator.pos, "Variable type '%s' is incomplete" % type) + if self.visibility == 'extern' and type.is_pyobject: + error(declarator.pos, "Python object cannot be declared extern") + name = name_declarator.name + cname = name_declarator.cname + if name == '': + error(declarator.pos, "Missing name in declaration.") + return + if type.is_reference and self.visibility != 'extern': + error(declarator.pos, "C++ references cannot be declared; use a pointer instead") + if type.is_rvalue_reference and self.visibility != 'extern': + error(declarator.pos, "C++ rvalue-references cannot be declared") + if type.is_cfunction: + if 'staticmethod' in env.directives: + type.is_static_method = True + self.entry = dest_scope.declare_cfunction( + name, type, declarator.pos, + cname=cname, visibility=self.visibility, in_pxd=self.in_pxd, + api=self.api, modifiers=self.modifiers, overridable=self.overridable) + if self.entry is not None: + self.entry.directive_locals = copy.copy(self.directive_locals) + if create_extern_wrapper: + self.entry.type.create_to_py_utility_code(env) + self.entry.create_wrapper = True + else: + if self.overridable: + error(self.pos, "Variables cannot be declared with 'cpdef'. Use 'cdef' instead.") + if self.directive_locals: + error(self.pos, "Decorators can only be followed by functions") + self.entry = dest_scope.declare_var( + name, type, declarator.pos, + cname=cname, visibility=visibility, in_pxd=self.in_pxd, + api=self.api, is_cdef=True, pytyping_modifiers=modifiers) + if Options.docstrings: + self.entry.doc = embed_position(self.pos, self.doc) + + +class CStructOrUnionDefNode(StatNode): + # name string + # cname string or None + # kind "struct" or "union" + # typedef_flag boolean + # visibility "public" or "private" + # api boolean + # in_pxd boolean + # attributes [CVarDefNode] or None + # entry Entry + # packed boolean + + child_attrs = ["attributes"] + + def declare(self, env, scope=None): + self.entry = env.declare_struct_or_union( + self.name, self.kind, scope, self.typedef_flag, self.pos, + self.cname, visibility=self.visibility, api=self.api, + packed=self.packed) + + def analyse_declarations(self, env): + scope = None + if self.attributes is not None: + scope = StructOrUnionScope(self.name) + self.declare(env, scope) + if self.attributes is not None: + if self.in_pxd and not env.in_cinclude: + self.entry.defined_in_pxd = 1 + for attr in self.attributes: + attr.analyse_declarations(env, scope) + if self.visibility != 'extern': + for attr in scope.var_entries: + type = attr.type + while type.is_array: + type = type.base_type + if type == self.entry.type: + error(attr.pos, "Struct cannot contain itself as a member.") + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + pass + + +class CppClassNode(CStructOrUnionDefNode, BlockNode): + + # name string + # cname string or None + # visibility "extern" + # in_pxd boolean + # attributes [CVarDefNode] or None + # entry Entry + # base_classes [CBaseTypeNode] + # templates [(string, bool)] or None + # decorators [DecoratorNode] or None + + decorators = None + + def declare(self, env): + if self.templates is None: + template_types = None + else: + template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) + for template_name, required in self.templates] + num_optional_templates = sum(not required for _, required in self.templates) + if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]): + error(self.pos, "Required template parameters must precede optional template parameters.") + self.entry = env.declare_cpp_class( + self.name, None, self.pos, self.cname, + base_classes=[], visibility=self.visibility, templates=template_types) + + def analyse_declarations(self, env): + if self.templates is None: + template_types = template_names = None + else: + template_names = [template_name for template_name, _ in self.templates] + template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) + for template_name, required in self.templates] + scope = None + if self.attributes is not None: + scope = CppClassScope(self.name, env, templates=template_names) + def base_ok(base_class): + if base_class.is_cpp_class or base_class.is_struct: + return True + else: + error(self.pos, "Base class '%s' not a struct or class." % base_class) + base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes]) + self.entry = env.declare_cpp_class( + self.name, scope, self.pos, + self.cname, base_class_types, visibility=self.visibility, templates=template_types) + if self.entry is None: + return + self.entry.is_cpp_class = 1 + if scope is not None: + scope.type = self.entry.type + defined_funcs = [] + def func_attributes(attributes): + for attr in attributes: + if isinstance(attr, CFuncDefNode): + yield attr + elif isinstance(attr, CompilerDirectivesNode): + for sub_attr in func_attributes(attr.body.stats): + yield sub_attr + elif isinstance(attr, CppClassNode) and attr.attributes is not None: + for sub_attr in func_attributes(attr.attributes): + yield sub_attr + if self.attributes is not None: + if self.in_pxd and not env.in_cinclude: + self.entry.defined_in_pxd = 1 + for attr in self.attributes: + declare = getattr(attr, 'declare', None) + if declare: + attr.declare(scope) + attr.analyse_declarations(scope) + for func in func_attributes(self.attributes): + defined_funcs.append(func) + if self.templates is not None: + func.template_declaration = "template " % ", typename ".join(template_names) + self.body = StatListNode(self.pos, stats=defined_funcs) + self.scope = scope + + def analyse_expressions(self, env): + self.body = self.body.analyse_expressions(self.entry.type.scope) + return self + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(self.entry.type.scope, code) + + def generate_execution_code(self, code): + self.body.generate_execution_code(code) + + def annotate(self, code): + self.body.annotate(code) + + +class CEnumDefNode(StatNode): + # name string or None + # cname string or None + # scoped boolean Is a C++ scoped enum + # underlying_type CSimpleBaseTypeNode The underlying value type (int or C++ type) + # items [CEnumDefItemNode] + # typedef_flag boolean + # visibility "public" or "private" or "extern" + # api boolean + # in_pxd boolean + # create_wrapper boolean + # entry Entry + # doc EncodedString or None Doc string + + child_attrs = ["items", "underlying_type"] + doc = None + + def declare(self, env): + doc = None + if Options.docstrings: + doc = embed_position(self.pos, self.doc) + + self.entry = env.declare_enum( + self.name, self.pos, + cname=self.cname, + scoped=self.scoped, + typedef_flag=self.typedef_flag, + visibility=self.visibility, api=self.api, + create_wrapper=self.create_wrapper, doc=doc) + + def analyse_declarations(self, env): + scope = None + underlying_type = self.underlying_type.analyse(env) + + if not underlying_type.is_int: + error(self.underlying_type.pos, "underlying type is not an integral type") + + self.entry.type.underlying_type = underlying_type + + if self.scoped and self.items is not None: + scope = CppScopedEnumScope(self.name, env) + scope.type = self.entry.type + scope.directives = env.directives + else: + scope = env + + if self.items is not None: + if self.in_pxd and not env.in_cinclude: + self.entry.defined_in_pxd = 1 + + # For extern enums, we can't reason about their equivalent int values because + # we don't know if their definition is complete. + is_declared_enum = self.visibility != 'extern' + + next_int_enum_value = 0 if is_declared_enum else None + for item in self.items: + item.analyse_enum_declarations(scope, self.entry, next_int_enum_value) + if is_declared_enum: + next_int_enum_value = 1 + ( + item.entry.enum_int_value if item.entry.enum_int_value is not None else next_int_enum_value) + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + if self.scoped: + return # nothing to do here for C++ enums + if self.visibility == 'public' or self.api: + code.mark_pos(self.pos) + temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) + for item in self.entry.enum_values: + code.putln("%s = PyInt_FromLong(%s); %s" % ( + temp, + item.cname, + code.error_goto_if_null(temp, item.pos))) + code.put_gotref(temp, PyrexTypes.py_object_type) + code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % ( + Naming.moddict_cname, + item.name, + temp, + code.error_goto(item.pos))) + code.put_decref_clear(temp, PyrexTypes.py_object_type) + code.funcstate.release_temp(temp) + + +class CEnumDefItemNode(StatNode): + # name string + # cname string or None + # value ExprNode or None + + child_attrs = ["value"] + + def analyse_enum_declarations(self, env, enum_entry, incremental_int_value): + if self.value: + self.value = self.value.analyse_const_expression(env) + if not self.value.type.is_int: + self.value = self.value.coerce_to(PyrexTypes.c_int_type, env) + self.value = self.value.analyse_const_expression(env) + + if enum_entry.type.is_cpp_enum: + cname = "%s::%s" % (enum_entry.cname, self.name) + else: + cname = self.cname + + self.entry = entry = env.declare_const( + self.name, enum_entry.type, + self.value, self.pos, cname=cname, + visibility=enum_entry.visibility, api=enum_entry.api, + create_wrapper=enum_entry.create_wrapper and enum_entry.name is None) + + # Use the incremental integer value unless we see an explicitly declared value. + enum_value = incremental_int_value + if self.value: + if self.value.is_literal: + enum_value = str_to_number(self.value.value) + elif (self.value.is_name or self.value.is_attribute) and self.value.entry: + enum_value = self.value.entry.enum_int_value + else: + # There is a value but we don't understand its integer value. + enum_value = None + if enum_value is not None: + entry.enum_int_value = enum_value + + enum_entry.enum_values.append(entry) + if enum_entry.name: + enum_entry.type.values.append(entry.name) + + +class CTypeDefNode(StatNode): + # base_type CBaseTypeNode + # declarator CDeclaratorNode + # visibility "public" or "private" + # api boolean + # in_pxd boolean + + child_attrs = ["base_type", "declarator"] + + def analyse_declarations(self, env): + base = self.base_type.analyse(env) + name_declarator, type = self.declarator.analyse( + base, env, visibility=self.visibility, in_pxd=self.in_pxd) + name = name_declarator.name + cname = name_declarator.cname + + entry = env.declare_typedef( + name, type, self.pos, + cname=cname, visibility=self.visibility, api=self.api) + + if type.is_fused: + entry.in_cinclude = True + + if self.in_pxd and not env.in_cinclude: + entry.defined_in_pxd = 1 + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + pass + + +class FuncDefNode(StatNode, BlockNode): + # Base class for function definition nodes. + # + # return_type PyrexType + # #filename string C name of filename string const + # entry Symtab.Entry + # needs_closure boolean Whether or not this function has inner functions/classes/yield + # needs_outer_scope boolean Whether or not this function requires outer scope + # pymethdef_required boolean Force Python method struct generation + # directive_locals { string : ExprNode } locals defined by cython.locals(...) + # directive_returns [ExprNode] type defined by cython.returns(...) + # star_arg PyArgDeclNode or None * argument + # starstar_arg PyArgDeclNode or None ** argument + # + # is_async_def boolean is a Coroutine function + # + # has_fused_arguments boolean + # Whether this cdef function has fused parameters. This is needed + # by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes + # with fused argument types with a FusedCFuncDefNode + + py_func = None + needs_closure = False + needs_outer_scope = False + pymethdef_required = False + is_generator = False + is_generator_expression = False # this can be True alongside is_generator + is_coroutine = False + is_asyncgen = False + is_generator_body = False + is_async_def = False + modifiers = [] + has_fused_arguments = False + star_arg = None + starstar_arg = None + is_cyfunction = False + code_object = None + return_type_annotation = None + + outer_attrs = None # overridden by some derived classes - to be visited outside the node's scope + + def analyse_default_values(self, env): + default_seen = 0 + for arg in self.args: + if arg.default: + default_seen = 1 + if arg.is_generic: + arg.default = arg.default.analyse_types(env) + arg.default = arg.default.coerce_to(arg.type, env) + elif arg.is_special_method_optional: + if not arg.default.is_none: + error(arg.pos, "This argument cannot have a non-None default value") + arg.default = None + else: + error(arg.pos, "This argument cannot have a default value") + arg.default = None + elif arg.kw_only: + default_seen = 1 + elif default_seen: + error(arg.pos, "Non-default argument following default argument") + + def analyse_annotations(self, env): + for arg in self.args: + if arg.annotation: + arg.annotation = arg.annotation.analyse_types(env) + if self.return_type_annotation: + self.return_type_annotation = self.return_type_annotation.analyse_types(env) + + def align_argument_type(self, env, arg): + # @cython.locals() + directive_locals = self.directive_locals + orig_type = arg.type + if arg.name in directive_locals: + type_node = directive_locals[arg.name] + other_type = type_node.analyse_as_type(env) + elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']: + type_node = arg.annotation + other_type = arg.inject_type_from_annotations(env) + if other_type is None: + return arg + else: + return arg + if other_type is None: + error(type_node.pos, "Not a type") + elif orig_type is not py_object_type and not orig_type.same_as(other_type): + error(arg.base_type.pos, "Signature does not agree with previous declaration") + error(type_node.pos, "Previous declaration here") + else: + arg.type = other_type + if arg.type.is_complex: + # utility code for complex types is special-cased and also important to ensure that it's run + arg.type.create_declaration_utility_code(env) + return arg + + def need_gil_acquisition(self, lenv): + return 0 + + def create_local_scope(self, env): + genv = env + while genv.is_py_class_scope or genv.is_c_class_scope: + genv = genv.outer_scope + if self.needs_closure: + cls = GeneratorExpressionScope if self.is_generator_expression else ClosureScope + lenv = cls(name=self.entry.name, + outer_scope=genv, + parent_scope=env, + scope_name=self.entry.cname) + else: + lenv = LocalScope(name=self.entry.name, + outer_scope=genv, + parent_scope=env) + lenv.return_type = self.return_type + type = self.entry.type + if type.is_cfunction: + lenv.nogil = type.nogil and not type.with_gil + self.local_scope = lenv + lenv.directives = env.directives + return lenv + + def generate_function_body(self, env, code): + self.body.generate_execution_code(code) + + def generate_function_definitions(self, env, code): + from . import Buffer + + lenv = self.local_scope + if lenv.is_closure_scope and not lenv.is_passthrough: + outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname, + Naming.outer_scope_cname) + else: + outer_scope_cname = Naming.outer_scope_cname + lenv.mangle_closure_cnames(outer_scope_cname) + # Generate closure function definitions + self.body.generate_function_definitions(lenv, code) + # generate lambda function definitions + self.generate_lambda_definitions(lenv, code) + + is_getbuffer_slot = (self.entry.name == "__getbuffer__" and + self.entry.scope.is_c_class_scope) + is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and + self.entry.scope.is_c_class_scope) + is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot + if is_buffer_slot: + if 'cython_unused' not in self.modifiers: + self.modifiers = self.modifiers + ['cython_unused'] + + preprocessor_guard = self.get_preprocessor_guard() + + profile = code.globalstate.directives['profile'] + linetrace = code.globalstate.directives['linetrace'] + if profile or linetrace: + if linetrace: + code.use_fast_gil_utility_code() + code.globalstate.use_utility_code( + UtilityCode.load_cached("Profile", "Profile.c")) + + # Generate C code for header and body of function + code.enter_cfunc_scope(lenv) + code.return_from_error_cleanup_label = code.new_label() + code.funcstate.gil_owned = not lenv.nogil + + # ----- Top-level constants used by this function + code.mark_pos(self.pos) + self.generate_cached_builtins_decls(lenv, code) + # ----- Function header + code.putln("") + + if preprocessor_guard: + code.putln(preprocessor_guard) + + with_pymethdef = (self.needs_assignment_synthesis(env, code) or + self.pymethdef_required) + if self.py_func: + self.py_func.generate_function_header( + code, with_pymethdef=with_pymethdef, proto_only=True) + self.generate_function_header(code, with_pymethdef=with_pymethdef) + # ----- Local variable declarations + # Find function scope + cenv = env + while cenv.is_py_class_scope or cenv.is_c_class_scope: + cenv = cenv.outer_scope + if self.needs_closure: + code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname)) + code.putln(";") + elif self.needs_outer_scope: + if lenv.is_passthrough: + code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname)) + code.putln(";") + code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname)) + code.putln(";") + self.generate_argument_declarations(lenv, code) + + for entry in lenv.var_entries: + if not (entry.in_closure or entry.is_arg): + code.put_var_declaration(entry) + + # Initialize the return variable __pyx_r + init = "" + return_type = self.return_type + if return_type.is_cv_qualified and return_type.is_const: + # Within this function body, we want to be able to set this + # variable, even though the function itself needs to return + # a const version + return_type = return_type.cv_base_type + if not return_type.is_void: + if return_type.is_pyobject: + init = " = NULL" + elif return_type.is_memoryviewslice: + init = ' = ' + return_type.literal_code(return_type.default_value) + + code.putln("%s%s;" % ( + return_type.declaration_code(Naming.retval_cname), + init)) + + tempvardecl_code = code.insertion_point() + self.generate_keyword_list(code) + + # ----- GIL acquisition + acquire_gil = self.acquire_gil + + used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used] + + # See if we need to acquire the GIL for variable declarations, or for + # refnanny only + # Closures are not currently possible for cdef nogil functions, + # but check them anyway + var_decls_definitely_need_gil = lenv.nogil and (self.needs_closure or self.needs_outer_scope) + + gilstate_decl = None + var_decls_need_gil = False + if acquire_gil or var_decls_definitely_need_gil: + code.put_ensure_gil() + code.funcstate.gil_owned = True + var_decls_need_gil = True + else: + gilstate_decl = code.insertion_point() + + if profile or linetrace: + if not self.is_generator: + # generators are traced when iterated, not at creation + tempvardecl_code.put_trace_declarations() + code_object = self.code_object.calculate_result_code(code) if self.code_object else None + code.put_trace_frame_init(code_object) + + # ----- Special check for getbuffer + if is_getbuffer_slot: + self.getbuffer_check(code) + + # ----- set up refnanny + refnanny_decl_code = tempvardecl_code.insertion_point() + refnanny_setup_code = code.insertion_point() + + # ----- Automatic lead-ins for certain special functions + if is_getbuffer_slot: + self.getbuffer_init(code) + # ----- Create closure scope object + if self.needs_closure: + tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__') + slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot) + if not slot_func_cname: + slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname + code.putln("%s = (%s)%s(%s, %s, NULL);" % ( + Naming.cur_scope_cname, + lenv.scope_class.type.empty_declaration_code(), + slot_func_cname, + lenv.scope_class.type.typeptr_cname, + Naming.empty_tuple)) + code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname) + # Scope unconditionally DECREFed on return. + code.putln("%s = %s;" % ( + Naming.cur_scope_cname, + lenv.scope_class.type.cast_code("Py_None"))) + code.put_incref("Py_None", py_object_type) + code.putln(code.error_goto(self.pos)) + code.putln("} else {") + code.put_gotref(Naming.cur_scope_cname, lenv.scope_class.type) + code.putln("}") + # Note that it is unsafe to decref the scope at this point. + if self.needs_outer_scope: + if self.is_cyfunction: + code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % ( + outer_scope_cname, + cenv.scope_class.type.empty_declaration_code(), + Naming.self_cname)) + else: + code.putln("%s = (%s) %s;" % ( + outer_scope_cname, + cenv.scope_class.type.empty_declaration_code(), + Naming.self_cname)) + if lenv.is_passthrough: + code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname)) + elif self.needs_closure: + # inner closures own a reference to their outer parent + code.put_incref(outer_scope_cname, cenv.scope_class.type) + code.put_giveref(outer_scope_cname, cenv.scope_class.type) + # ----- Trace function call + if profile or linetrace: + # this looks a bit late, but if we don't get here due to a + # fatal error before hand, it's not really worth tracing + if not self.is_generator: + # generators are traced when iterated, not at creation + if self.is_wrapper: + trace_name = self.entry.name + " (wrapper)" + else: + trace_name = self.entry.name + code.put_trace_call( + trace_name, self.pos, nogil=not code.funcstate.gil_owned) + code.funcstate.can_trace = True + # ----- Fetch arguments + self.generate_argument_parsing_code(env, code) + # If an argument is assigned to in the body, we must + # incref it to properly keep track of refcounts. + for entry in lenv.arg_entries: + if not entry.type.is_memoryviewslice: + if (acquire_gil or entry.cf_is_reassigned) and not entry.in_closure: + code.put_var_incref(entry) + # Note: defaults are always incref-ed. For def functions, we + # we acquire arguments from object conversion, so we have + # new references. If we are a cdef function, we need to + # incref our arguments + elif entry.cf_is_reassigned and not entry.in_closure: + code.put_var_incref_memoryviewslice(entry, + have_gil=code.funcstate.gil_owned) + for entry in lenv.var_entries: + if entry.is_arg and entry.cf_is_reassigned and not entry.in_closure: + if entry.type.is_memoryviewslice: + code.put_var_incref_memoryviewslice(entry, + have_gil=code.funcstate.gil_owned) + if entry.xdecref_cleanup: + code.put_var_xincref(entry) + else: + code.put_var_incref(entry) + + # ----- Initialise local buffer auxiliary variables + for entry in lenv.var_entries + lenv.arg_entries: + if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used: + Buffer.put_init_vars(entry, code) + + # ----- Check and convert arguments + self.generate_argument_type_tests(code) + # ----- Acquire buffer arguments + for entry in lenv.arg_entries: + if entry.type.is_buffer: + Buffer.put_acquire_arg_buffer(entry, code, self.pos) + + if code.funcstate.needs_refnanny: + # if this is true there's definite some reference counting in + # the variable declarations + var_decls_need_gil = True + + if var_decls_need_gil and lenv.nogil: + if gilstate_decl is not None: + gilstate_decl.put_ensure_gil() + gilstate_decl = None + code.funcstate.gil_owned = True + code.put_release_ensured_gil() + code.funcstate.gil_owned = False + + # ------------------------- + # ----- Function body ----- + # ------------------------- + self.generate_function_body(env, code) + + code.mark_pos(self.pos, trace=False) + code.putln("") + code.putln("/* function exit code */") + + gil_owned = { + 'success': code.funcstate.gil_owned, + 'error': code.funcstate.gil_owned, + 'gil_state_declared': gilstate_decl is None, + } + def assure_gil(code_path, code=code): + if not gil_owned[code_path]: + if not gil_owned['gil_state_declared']: + gilstate_decl.declare_gilstate() + gil_owned['gil_state_declared'] = True + code.put_ensure_gil(declare_gilstate=False) + gil_owned[code_path] = True + + # ----- Default return value + return_type = self.return_type + if not self.body.is_terminator: + if return_type.is_pyobject: + #if return_type.is_extension_type: + # lhs = "(PyObject *)%s" % Naming.retval_cname + #else: + lhs = Naming.retval_cname + assure_gil('success') + code.put_init_to_py_none(lhs, return_type) + elif not return_type.is_memoryviewslice: + # memory view structs receive their default value on initialisation + val = return_type.default_value + if val: + code.putln("%s = %s;" % (Naming.retval_cname, val)) + elif not return_type.is_void: + code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname) + + # ----- Error cleanup + if code.label_used(code.error_label): + if not self.body.is_terminator: + code.put_goto(code.return_label) + code.put_label(code.error_label) + for cname, type in code.funcstate.all_managed_temps(): + assure_gil('error') + code.put_xdecref(cname, type, have_gil=gil_owned['error']) + + # Clean up buffers -- this calls a Python function + # so need to save and restore error state + buffers_present = len(used_buffer_entries) > 0 + #memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice] + if buffers_present: + code.globalstate.use_utility_code(restore_exception_utility_code) + code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;") + code.putln("__Pyx_PyThreadState_declare") + assure_gil('error') + code.putln("__Pyx_PyThreadState_assign") + code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);") + for entry in used_buffer_entries: + Buffer.put_release_buffer_code(code, entry) + #code.putln("%s = 0;" % entry.cname) + code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}") + + if return_type.is_memoryviewslice: + from . import MemoryView + MemoryView.put_init_entry(Naming.retval_cname, code) + err_val = Naming.retval_cname + else: + err_val = self.error_value() + + exc_check = self.caller_will_check_exceptions() + if err_val is not None or exc_check: + # TODO: Fix exception tracing (though currently unused by cProfile). + # code.globalstate.use_utility_code(get_exception_tuple_utility_code) + # code.put_trace_exception() + + assure_gil('error') + if code.funcstate.error_without_exception: + tempvardecl_code.putln( + "int %s = 0; /* StopIteration */" % Naming.error_without_exception_cname + ) + code.putln("if (!%s) {" % Naming.error_without_exception_cname) + code.put_add_traceback(self.entry.qualified_name) + if code.funcstate.error_without_exception: + code.putln("}") + else: + warning(self.entry.pos, + "Unraisable exception in function '%s'." % + self.entry.qualified_name, 0) + assure_gil('error') + code.put_unraisable(self.entry.qualified_name) + default_retval = return_type.default_value + if err_val is None and default_retval: + err_val = default_retval + if err_val is not None: + if err_val != Naming.retval_cname: + code.putln("%s = %s;" % (Naming.retval_cname, err_val)) + elif not return_type.is_void: + code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname) + + if is_getbuffer_slot: + assure_gil('error') + self.getbuffer_error_cleanup(code) + + def align_error_path_gil_to_success_path(code=code.insertion_point()): + # align error and success GIL state when both join + if gil_owned['success']: + assure_gil('error', code=code) + elif gil_owned['error']: + code.put_release_ensured_gil() + gil_owned['error'] = False + assert gil_owned['error'] == gil_owned['success'], "%s: error path %s != success path %s" % ( + self.pos, gil_owned['error'], gil_owned['success']) + + # If we are using the non-error cleanup section we should + # jump past it if we have an error. The if-test below determine + # whether this section is used. + if buffers_present or is_getbuffer_slot or return_type.is_memoryviewslice: + # In the buffer cases, we already called assure_gil('error') and own the GIL. + assert gil_owned['error'] or return_type.is_memoryviewslice + code.put_goto(code.return_from_error_cleanup_label) + else: + # Adapt the GIL state to the success path right now. + align_error_path_gil_to_success_path() + else: + # No error path, no need to adapt the GIL state. + def align_error_path_gil_to_success_path(): pass + + # ----- Non-error return cleanup + if code.label_used(code.return_label) or not code.label_used(code.error_label): + code.put_label(code.return_label) + + for entry in used_buffer_entries: + assure_gil('success') + Buffer.put_release_buffer_code(code, entry) + if is_getbuffer_slot: + assure_gil('success') + self.getbuffer_normal_cleanup(code) + + if return_type.is_memoryviewslice: + # See if our return value is uninitialized on non-error return + # from . import MemoryView + # MemoryView.err_if_nogil_initialized_check(self.pos, env) + cond = code.unlikely(return_type.error_condition(Naming.retval_cname)) + code.putln( + 'if (%s) {' % cond) + if not gil_owned['success']: + code.put_ensure_gil() + code.putln( + 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");') + if not gil_owned['success']: + code.put_release_ensured_gil() + code.putln( + '}') + + # ----- Return cleanup for both error and no-error return + if code.label_used(code.return_from_error_cleanup_label): + align_error_path_gil_to_success_path() + code.put_label(code.return_from_error_cleanup_label) + + for entry in lenv.var_entries: + if not entry.used or entry.in_closure: + continue + + if entry.type.needs_refcounting: + if entry.is_arg and not entry.cf_is_reassigned: + continue + if entry.type.refcounting_needs_gil: + assure_gil('success') + # FIXME ideally use entry.xdecref_cleanup but this currently isn't reliable + code.put_var_xdecref(entry, have_gil=gil_owned['success']) + + # Decref any increfed args + for entry in lenv.arg_entries: + if entry.in_closure: + continue + if entry.type.is_memoryviewslice: + # decref slices of def functions and acquired slices from cdef + # functions, but not borrowed slices from cdef functions. + if not entry.cf_is_reassigned: + continue + else: + if not acquire_gil and not entry.cf_is_reassigned: + continue + if entry.type.needs_refcounting: + assure_gil('success') + + # FIXME use entry.xdecref_cleanup - del arg seems to be the problem + code.put_var_xdecref(entry, have_gil=gil_owned['success']) + if self.needs_closure: + assure_gil('success') + code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type) + + # ----- Return + # This code is duplicated in ModuleNode.generate_module_init_func + if not lenv.nogil: + default_retval = return_type.default_value + err_val = self.error_value() + if err_val is None and default_retval: + err_val = default_retval # FIXME: why is err_val not used? + code.put_xgiveref(Naming.retval_cname, return_type) + + if self.entry.is_special and self.entry.name == "__hash__": + # Returning -1 for __hash__ is supposed to signal an error + # We do as Python instances and coerce -1 into -2. + assure_gil('success') # in special methods, the GIL is owned anyway + code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % ( + Naming.retval_cname, Naming.retval_cname)) + + if profile or linetrace: + code.funcstate.can_trace = False + if not self.is_generator: + # generators are traced when iterated, not at creation + if return_type.is_pyobject: + code.put_trace_return( + Naming.retval_cname, nogil=not gil_owned['success']) + else: + code.put_trace_return( + "Py_None", nogil=not gil_owned['success']) + + if code.funcstate.needs_refnanny: + refnanny_decl_code.put_declare_refcount_context() + refnanny_setup_code.put_setup_refcount_context( + self.entry.name, acquire_gil=not var_decls_need_gil) + code.put_finish_refcount_context(nogil=not gil_owned['success']) + + if acquire_gil or (lenv.nogil and gil_owned['success']): + # release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode) + code.put_release_ensured_gil() + code.funcstate.gil_owned = False + + if not return_type.is_void: + code.putln("return %s;" % Naming.retval_cname) + + code.putln("}") + + if preprocessor_guard: + code.putln("#endif /*!(%s)*/" % preprocessor_guard) + + # ----- Go back and insert temp variable declarations + tempvardecl_code.put_temp_declarations(code.funcstate) + + # ----- Python version + code.exit_cfunc_scope() + if self.py_func: + self.py_func.generate_function_definitions(env, code) + self.generate_wrapper_functions(code) + + def declare_argument(self, env, arg): + if arg.type.is_void: + error(arg.pos, "Invalid use of 'void'") + elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice): + error(arg.pos, "Argument type '%s' is incomplete" % arg.type) + entry = env.declare_arg(arg.name, arg.type, arg.pos) + if arg.annotation: + entry.annotation = arg.annotation + return entry + + def generate_arg_type_test(self, arg, code): + # Generate type test for one argument. + if arg.type.typeobj_is_available(): + code.globalstate.use_utility_code( + UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c")) + typeptr_cname = arg.type.typeptr_cname + arg_code = "((PyObject *)%s)" % arg.entry.cname + code.putln( + 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, %s, %s))) %s' % ( + arg_code, + typeptr_cname, + arg.accept_none, + arg.name_cstring, + arg.type.is_builtin_type and arg.type.require_exact, + code.error_goto(arg.pos))) + else: + error(arg.pos, "Cannot test type of extern C class without type object name specification") + + def generate_arg_none_check(self, arg, code): + # Generate None check for one argument. + if arg.type.is_memoryviewslice: + cname = "%s.memview" % arg.entry.cname + else: + cname = arg.entry.cname + + code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname) + code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", %s); %s''' % ( + max(200, len(arg.name_cstring)), arg.name_cstring, + code.error_goto(arg.pos))) + code.putln('}') + + def generate_wrapper_functions(self, code): + pass + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + # Evaluate and store argument default values + # skip this for wrappers since it's done by wrapped function + if not self.is_wrapper: + for arg in self.args: + if not arg.is_dynamic: + arg.generate_assignment_code(code) + + # + # Special code for the __getbuffer__ function + # + def _get_py_buffer_info(self): + py_buffer = self.local_scope.arg_entries[1] + try: + # Check builtin definition of struct Py_buffer + obj_type = py_buffer.type.base_type.scope.entries['obj'].type + except (AttributeError, KeyError): + # User code redeclared struct Py_buffer + obj_type = None + return py_buffer, obj_type + + # Old Python 3 used to support write-locks on buffer-like objects by + # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure + # feature is obsolete, it was almost never used (only one instance in + # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed + # (see bpo-14203). We add an extra check here to prevent legacy code from + # from trying to use the feature and prevent segmentation faults. + def getbuffer_check(self, code): + py_buffer, _ = self._get_py_buffer_info() + view = py_buffer.cname + code.putln("if (unlikely(%s == NULL)) {" % view) + code.putln("PyErr_SetString(PyExc_BufferError, " + "\"PyObject_GetBuffer: view==NULL argument is obsolete\");") + code.putln("return -1;") + code.putln("}") + + def getbuffer_init(self, code): + py_buffer, obj_type = self._get_py_buffer_info() + view = py_buffer.cname + if obj_type and obj_type.is_pyobject: + code.put_init_to_py_none("%s->obj" % view, obj_type) + code.put_giveref("%s->obj" % view, obj_type) # Do not refnanny object within structs + else: + code.putln("%s->obj = NULL;" % view) + + def getbuffer_error_cleanup(self, code): + py_buffer, obj_type = self._get_py_buffer_info() + view = py_buffer.cname + if obj_type and obj_type.is_pyobject: + code.putln("if (%s->obj != NULL) {" % view) + code.put_gotref("%s->obj" % view, obj_type) + code.put_decref_clear("%s->obj" % view, obj_type) + code.putln("}") + else: + code.putln("Py_CLEAR(%s->obj);" % view) + + def getbuffer_normal_cleanup(self, code): + py_buffer, obj_type = self._get_py_buffer_info() + view = py_buffer.cname + if obj_type and obj_type.is_pyobject: + code.putln("if (%s->obj == Py_None) {" % view) + code.put_gotref("%s->obj" % view, obj_type) + code.put_decref_clear("%s->obj" % view, obj_type) + code.putln("}") + + def get_preprocessor_guard(self): + if not self.entry.is_special: + return None + name = self.entry.name + slot = TypeSlots.get_slot_table(self.local_scope.directives).get_slot_by_method_name(name) + if not slot: + return None + if name == '__long__' and not self.entry.scope.lookup_here('__int__'): + return None + if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope: + return None + return slot.preprocessor_guard_code() + + +class CFuncDefNode(FuncDefNode): + # C function definition. + # + # modifiers ['inline'] + # visibility 'private' or 'public' or 'extern' + # base_type CBaseTypeNode + # declarator CDeclaratorNode + # cfunc_declarator the CFuncDeclarator of this function + # (this is also available through declarator or a + # base thereof) + # body StatListNode + # api boolean + # decorators [DecoratorNode] list of decorators + # + # with_gil boolean Acquire GIL around body + # type CFuncType + # py_func wrapper for calling from Python + # overridable whether or not this is a cpdef function + # inline_in_pxd whether this is an inline function in a pxd file + # template_declaration String or None Used for c++ class methods + # is_const_method whether this is a const method + # is_static_method whether this is a static method + # is_c_class_method whether this is a cclass method + + child_attrs = ["base_type", "declarator", "body", "decorators", "py_func_stat"] + outer_attrs = ["decorators", "py_func_stat"] + + inline_in_pxd = False + decorators = None + directive_locals = None + directive_returns = None + override = None + template_declaration = None + is_const_method = False + py_func_stat = None + + def unqualified_name(self): + return self.entry.name + + def declared_name(self): + return self.declarator.declared_name() + + @property + def code_object(self): + # share the CodeObject with the cpdef wrapper (if available) + return self.py_func.code_object if self.py_func else None + + def analyse_declarations(self, env): + self.is_c_class_method = env.is_c_class_scope + if self.directive_locals is None: + self.directive_locals = {} + self.directive_locals.update(env.directives.get('locals', {})) + if self.directive_returns is not None: + base_type = self.directive_returns.analyse_as_type(env) + if base_type is None: + error(self.directive_returns.pos, "Not a type") + base_type = PyrexTypes.error_type + else: + base_type = self.base_type.analyse(env) + self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod') + # The 2 here is because we need both function and argument names. + if isinstance(self.declarator, CFuncDeclaratorNode): + name_declarator, typ = self.declarator.analyse( + base_type, env, nonempty=2 * (self.body is not None), + directive_locals=self.directive_locals, visibility=self.visibility) + else: + name_declarator, typ = self.declarator.analyse( + base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility) + if not typ.is_cfunction: + error(self.pos, "Suite attached to non-function declaration") + # Remember the actual type according to the function header + # written here, because the type in the symbol table entry + # may be different if we're overriding a C method inherited + # from the base type of an extension type. + self.type = typ + typ.is_overridable = self.overridable + declarator = self.declarator + while not hasattr(declarator, 'args'): + declarator = declarator.base + + self.cfunc_declarator = declarator + self.args = declarator.args + + opt_arg_count = self.cfunc_declarator.optional_arg_count + if (self.visibility == 'public' or self.api) and opt_arg_count: + error(self.cfunc_declarator.pos, + "Function with optional arguments may not be declared public or api") + + if typ.exception_check == '+' and self.visibility != 'extern': + if typ.exception_value and typ.exception_value.is_name: + # it really is impossible to reason about what the user wants to happens + # if they've specified a C++ exception translation function. Therefore, + # raise an error. + error(self.pos, + "Only extern functions can throw C++ exceptions.") + else: + warning(self.pos, + "Only extern functions can throw C++ exceptions.", 2) + + for formal_arg, type_arg in zip(self.args, typ.args): + self.align_argument_type(env, type_arg) + formal_arg.type = type_arg.type + formal_arg.name = type_arg.name + formal_arg.cname = type_arg.cname + + self._validate_type_visibility(type_arg.type, type_arg.pos, env) + + if type_arg.type.is_fused: + self.has_fused_arguments = True + + if type_arg.type.is_buffer and 'inline' in self.modifiers: + warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) + + if type_arg.type.is_buffer or type_arg.type.is_pythran_expr: + if self.type.nogil: + error(formal_arg.pos, + "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") + elif 'inline' in self.modifiers: + warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) + + self._validate_type_visibility(typ.return_type, self.pos, env) + + name = name_declarator.name + cname = name_declarator.cname + + typ.is_const_method = self.is_const_method + typ.is_static_method = self.is_static_method + + self.entry = env.declare_cfunction( + name, typ, self.pos, + cname=cname, visibility=self.visibility, api=self.api, + defining=self.body is not None, modifiers=self.modifiers, + overridable=self.overridable, in_pxd=self.inline_in_pxd) + self.return_type = typ.return_type + + if self.return_type.is_array and self.visibility != 'extern': + error(self.pos, "Function cannot return an array") + if self.return_type.is_cpp_class: + self.return_type.check_nullary_constructor(self.pos, "used as a return value") + + if self.overridable and not env.is_module_scope and not self.is_static_method: + if len(self.args) < 1 or not self.args[0].type.is_pyobject: + # An error will be produced in the cdef function + self.overridable = False + + self.declare_cpdef_wrapper(env) + self.create_local_scope(env) + + def declare_cpdef_wrapper(self, env): + if not self.overridable: + return + if self.is_static_method: + # TODO(robertwb): Finish this up, perhaps via more function refactoring. + error(self.pos, "static cpdef methods not yet supported") + + name = self.entry.name + py_func_body = self.call_self_node(is_module_scope=env.is_module_scope) + if self.is_static_method: + from .ExprNodes import NameNode + decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name=EncodedString('staticmethod')))] + decorators[0].decorator.analyse_types(env) + else: + decorators = [] + self.py_func = DefNode(pos=self.pos, + name=self.entry.name, + args=self.args, + star_arg=None, + starstar_arg=None, + doc=self.doc, + body=py_func_body, + decorators=decorators, + is_wrapper=1) + self.py_func.is_module_scope = env.is_module_scope + self.py_func.analyse_declarations(env) + self.py_func.entry.is_overridable = True + self.py_func_stat = StatListNode(self.pos, stats=[self.py_func]) + self.py_func.type = PyrexTypes.py_object_type + self.entry.as_variable = self.py_func.entry + self.entry.used = self.entry.as_variable.used = True + # Reset scope entry the above cfunction + env.entries[name] = self.entry + if (not self.entry.is_final_cmethod and + (not env.is_module_scope or Options.lookup_module_cpdef)): + if self.override: + # This is a hack: we shouldn't create the wrapper twice, but we do for fused functions. + assert self.entry.is_fused_specialized # should not happen for non-fused cpdef functions + self.override.py_func = self.py_func + else: + self.override = OverrideCheckNode(self.pos, py_func=self.py_func) + self.body = StatListNode(self.pos, stats=[self.override, self.body]) + + def _validate_type_visibility(self, type, pos, env): + """ + Ensure that types used in cdef functions are public or api, or + defined in a C header. + """ + public_or_api = (self.visibility == 'public' or self.api) + entry = getattr(type, 'entry', None) + if public_or_api and entry and env.is_module_scope: + if not (entry.visibility in ('public', 'extern') or + entry.api or entry.in_cinclude): + error(pos, "Function declared public or api may not have private types") + + def call_self_node(self, omit_optional_args=0, is_module_scope=0): + from . import ExprNodes + args = self.type.args + if omit_optional_args: + args = args[:len(args) - self.type.optional_arg_count] + arg_names = [arg.name for arg in args] + if is_module_scope: + cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name) + call_arg_names = arg_names + skip_dispatch = Options.lookup_module_cpdef + elif self.type.is_static_method: + class_entry = self.entry.scope.parent_type.entry + class_node = ExprNodes.NameNode(self.pos, name=class_entry.name) + class_node.entry = class_entry + cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name) + # Calling static c(p)def methods on an instance disallowed. + # TODO(robertwb): Support by passing self to check for override? + skip_dispatch = True + else: + type_entry = self.type.args[0].type.entry + type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name) + type_arg.entry = type_entry + cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name) + skip_dispatch = not is_module_scope or Options.lookup_module_cpdef + c_call = ExprNodes.SimpleCallNode( + self.pos, + function=cfunc, + args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names], + wrapper_call=skip_dispatch) + return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call) + + def declare_arguments(self, env): + for arg in self.type.args: + if not arg.name: + error(arg.pos, "Missing argument name") + self.declare_argument(env, arg) + + def need_gil_acquisition(self, lenv): + return self.type.with_gil + + def nogil_check(self, env): + type = self.type + with_gil = type.with_gil + if type.nogil and not with_gil: + if type.return_type.is_pyobject: + error(self.pos, + "Function with Python return type cannot be declared nogil") + for entry in self.local_scope.var_entries: + if entry.type.is_pyobject and not entry.in_with_gil_block: + error(self.pos, "Function declared nogil has Python locals or temporaries") + + def analyse_expressions(self, env): + self.local_scope.directives = env.directives + if self.py_func_stat is not None: + # this will also analyse the default values and the function name assignment + self.py_func_stat = self.py_func_stat.analyse_expressions(env) + elif self.py_func is not None: + # this will also analyse the default values + self.py_func = self.py_func.analyse_expressions(env) + else: + self.analyse_default_values(env) + self.analyse_annotations(env) + self.acquire_gil = self.need_gil_acquisition(self.local_scope) + return self + + def needs_assignment_synthesis(self, env, code=None): + return False + + def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None): + scope = self.local_scope + arg_decls = [] + type = self.type + for arg in type.args[:len(type.args)-type.optional_arg_count]: + arg_decl = arg.declaration_code() + entry = scope.lookup(arg.name) + if not entry.cf_used: + arg_decl = 'CYTHON_UNUSED %s' % arg_decl + arg_decls.append(arg_decl) + if with_dispatch and self.overridable: + dispatch_arg = PyrexTypes.c_int_type.declaration_code( + Naming.skip_dispatch_cname) + if self.override: + arg_decls.append(dispatch_arg) + else: + arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg) + if type.optional_arg_count and with_opt_args: + arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname)) + if type.has_varargs: + arg_decls.append("...") + if not arg_decls: + arg_decls = ["void"] + if cname is None: + cname = self.entry.func_cname + entity = type.function_header_code(cname, ', '.join(arg_decls)) + if self.entry.visibility == 'private' and '::' not in cname: + storage_class = "static " + else: + storage_class = "" + dll_linkage = None + modifiers = code.build_function_modifiers(self.entry.func_modifiers) + + header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage) + #print (storage_class, modifiers, header) + needs_proto = self.is_c_class_method or self.entry.is_cproperty + if self.template_declaration: + if needs_proto: + code.globalstate.parts['module_declarations'].putln(self.template_declaration) + code.putln(self.template_declaration) + if needs_proto: + code.globalstate.parts['module_declarations'].putln( + "%s%s%s; /* proto*/" % (storage_class, modifiers, header)) + code.putln("%s%s%s {" % (storage_class, modifiers, header)) + + def generate_argument_declarations(self, env, code): + scope = self.local_scope + for arg in self.args: + if arg.default: + entry = scope.lookup(arg.name) + if self.override or entry.cf_used: + result = arg.calculate_default_value_code(code) + code.putln('%s = %s;' % ( + arg.type.declaration_code(arg.cname), result)) + + def generate_keyword_list(self, code): + pass + + def generate_argument_parsing_code(self, env, code): + i = 0 + used = 0 + scope = self.local_scope + if self.type.optional_arg_count: + code.putln('if (%s) {' % Naming.optional_args_cname) + for arg in self.args: + if arg.default: + entry = scope.lookup(arg.name) + if self.override or entry.cf_used: + code.putln('if (%s->%sn > %s) {' % + (Naming.optional_args_cname, + Naming.pyrex_prefix, i)) + declarator = arg.declarator + while not hasattr(declarator, 'name'): + declarator = declarator.base + code.putln('%s = %s->%s;' % + (arg.cname, Naming.optional_args_cname, + self.type.opt_arg_cname(declarator.name))) + used += 1 + i += 1 + for _ in range(used): + code.putln('}') + code.putln('}') + + # Move arguments into closure if required + def put_into_closure(entry): + if entry.in_closure and not arg.default: + code.putln('%s = %s;' % (entry.cname, entry.original_cname)) + if entry.type.is_memoryviewslice: + entry.type.generate_incref_memoryviewslice(code, entry.cname, True) + else: + code.put_var_incref(entry) + code.put_var_giveref(entry) + for arg in self.args: + put_into_closure(scope.lookup_here(arg.name)) + + + def generate_argument_conversion_code(self, code): + pass + + def generate_argument_type_tests(self, code): + # Generate type tests for args whose type in a parent + # class is a supertype of the declared type. + for arg in self.type.args: + if arg.needs_type_test: + self.generate_arg_type_test(arg, code) + elif arg.type.is_pyobject and not arg.accept_none: + self.generate_arg_none_check(arg, code) + + def generate_execution_code(self, code): + if code.globalstate.directives['linetrace']: + code.mark_pos(self.pos) + code.putln("") # generate line tracing code + super(CFuncDefNode, self).generate_execution_code(code) + if self.py_func_stat: + self.py_func_stat.generate_execution_code(code) + + def error_value(self): + if self.return_type.is_pyobject: + return "0" + else: + return self.entry.type.exception_value + + def caller_will_check_exceptions(self): + return self.entry.type.exception_check + + def generate_wrapper_functions(self, code): + # If the C signature of a function has changed, we need to generate + # wrappers to put in the slots here. + k = 0 + entry = self.entry + func_type = entry.type + while entry.prev_entry is not None: + k += 1 + entry = entry.prev_entry + entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k) + code.putln() + self.generate_function_header( + code, 0, + with_dispatch=entry.type.is_overridable, + with_opt_args=entry.type.optional_arg_count, + cname=entry.func_cname) + if not self.return_type.is_void: + code.put('return ') + args = self.type.args + arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]] + if entry.type.is_overridable: + arglist.append(Naming.skip_dispatch_cname) + elif func_type.is_overridable: + arglist.append('0') + if entry.type.optional_arg_count: + arglist.append(Naming.optional_args_cname) + elif func_type.optional_arg_count: + arglist.append('NULL') + code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist))) + code.putln('}') + + +class PyArgDeclNode(Node): + # Argument which must be a Python object (used + # for * and ** arguments). + # + # name string + # entry Symtab.Entry + # annotation ExprNode or None Py3 argument annotation + child_attrs = [] + is_self_arg = False + is_type_arg = False + + def generate_function_definitions(self, env, code): + self.entry.generate_function_definitions(env, code) + + +class DecoratorNode(Node): + # A decorator + # + # decorator ExprNode + child_attrs = ['decorator'] + + +class DefNode(FuncDefNode): + # A Python function definition. + # + # name string the Python name of the function + # lambda_name string the internal name of a lambda 'function' + # decorators [DecoratorNode] list of decorators + # args [CArgDeclNode] formal arguments + # doc EncodedString or None + # body StatListNode + # return_type_annotation + # ExprNode or None the Py3 return type annotation + # + # The following subnode is constructed internally + # when the def statement is inside a Python class definition. + # + # fused_py_func DefNode The original fused cpdef DefNode + # (in case this is a specialization) + # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes + # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign + # + # decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions + + child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] + outer_attrs = ["decorators", "return_type_annotation"] + + is_staticmethod = False + is_classmethod = False + + lambda_name = None + reqd_kw_flags_cname = "0" + is_wrapper = 0 + no_assignment_synthesis = 0 + decorators = None + return_type_annotation = None + entry = None + acquire_gil = 0 + self_in_stararg = 0 + py_cfunc_node = None + requires_classobj = False + defaults_struct = None # Dynamic kwrds structure name + doc = None + + fused_py_func = False + specialized_cpdefs = None + py_wrapper = None + py_wrapper_required = True + func_cname = None + + defaults_getter = None + + def __init__(self, pos, **kwds): + FuncDefNode.__init__(self, pos, **kwds) + p = k = rk = r = 0 + for arg in self.args: + if arg.pos_only: + p += 1 + if arg.kw_only: + k += 1 + if not arg.default: + rk += 1 + if not arg.default: + r += 1 + self.num_posonly_args = p + self.num_kwonly_args = k + self.num_required_kw_args = rk + self.num_required_args = r + + def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, except_val=None, has_explicit_exc_clause=False, + modifiers=None, nogil=False, with_gil=False): + if self.star_arg: + error(self.star_arg.pos, "cdef function cannot have star argument") + if self.starstar_arg: + error(self.starstar_arg.pos, "cdef function cannot have starstar argument") + exception_value, exception_check = except_val or (None, False) + nogil = nogil or with_gil + + if cfunc is None: + cfunc_args = [] + for formal_arg in self.args: + name_declarator, type = formal_arg.analyse(scope, nonempty=1) + cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name, + cname=None, + annotation=formal_arg.annotation, + type=py_object_type, + pos=formal_arg.pos)) + cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type, + args=cfunc_args, + has_varargs=False, + exception_value=None, + exception_check=exception_check, + nogil=nogil, + with_gil=with_gil, + is_overridable=overridable) + cfunc = CVarDefNode(self.pos, type=cfunc_type) + else: + if scope is None: + scope = cfunc.scope + cfunc_type = cfunc.type + if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs: + error(self.pos, "wrong number of arguments") + error(cfunc.pos, "previous declaration here") + for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)): + name_declarator, type = formal_arg.analyse(scope, nonempty=1, + is_self_arg=(i == 0 and scope.is_c_class_scope)) + if type is None or type is PyrexTypes.py_object_type: + formal_arg.type = type_arg.type + formal_arg.name_declarator = name_declarator + + if exception_value is None and cfunc_type.exception_value is not None: + from .ExprNodes import ConstNode + exception_value = ConstNode( + self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type) + declarator = CFuncDeclaratorNode(self.pos, + base=CNameDeclaratorNode(self.pos, name=self.name, cname=None), + args=self.args, + has_varargs=False, + exception_check=cfunc_type.exception_check, + exception_value=exception_value, + has_explicit_exc_clause = has_explicit_exc_clause, + with_gil=cfunc_type.with_gil, + nogil=cfunc_type.nogil) + return CFuncDefNode(self.pos, + modifiers=modifiers or [], + base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type), + declarator=declarator, + body=self.body, + doc=self.doc, + overridable=cfunc_type.is_overridable, + type=cfunc_type, + with_gil=cfunc_type.with_gil, + nogil=cfunc_type.nogil, + visibility='private', + api=False, + directive_locals=getattr(cfunc, 'directive_locals', {}), + directive_returns=returns) + + def is_cdef_func_compatible(self): + """Determines if the function's signature is compatible with a + cdef function. This can be used before calling + .as_cfunction() to see if that will be successful. + """ + if self.needs_closure: + return False + if self.star_arg or self.starstar_arg: + return False + return True + + def analyse_declarations(self, env): + if self.decorators: + for decorator in self.decorators: + func = decorator.decorator + if func.is_name: + self.is_classmethod |= func.name == 'classmethod' + self.is_staticmethod |= func.name == 'staticmethod' + + if self.is_classmethod and env.lookup_here('classmethod'): + # classmethod() was overridden - not much we can do here ... + self.is_classmethod = False + if self.is_staticmethod and env.lookup_here('staticmethod'): + # staticmethod() was overridden - not much we can do here ... + self.is_staticmethod = False + + if env.is_py_class_scope or env.is_c_class_scope: + if self.name == '__new__' and env.is_py_class_scope: + self.is_staticmethod = True + elif self.name == '__init_subclass__' and env.is_c_class_scope: + error(self.pos, "'__init_subclass__' is not supported by extension class") + elif self.name in IMPLICIT_CLASSMETHODS and not self.is_classmethod: + self.is_classmethod = True + # TODO: remove the need to generate a real decorator here, is_classmethod=True should suffice. + from .ExprNodes import NameNode + self.decorators = self.decorators or [] + self.decorators.insert(0, DecoratorNode( + self.pos, decorator=NameNode(self.pos, name=EncodedString('classmethod')))) + + self.analyse_argument_types(env) + if self.name == '': + self.declare_lambda_function(env) + else: + self.declare_pyfunction(env) + + self.analyse_signature(env) + self.return_type = self.entry.signature.return_type() + # if a signature annotation provides a more specific return object type, use it + if self.return_type is py_object_type and self.return_type_annotation: + if env.directives['annotation_typing'] and not self.entry.is_special: + _, return_type = self.return_type_annotation.analyse_type_annotation(env) + if return_type and return_type.is_pyobject: + self.return_type = return_type + + self.create_local_scope(env) + + self.py_wrapper = DefNodeWrapper( + self.pos, + target=self, + name=self.entry.name, + args=self.args, + star_arg=self.star_arg, + starstar_arg=self.starstar_arg, + return_type=self.return_type) + self.py_wrapper.analyse_declarations(env) + + def analyse_argument_types(self, env): + self.directive_locals = env.directives.get('locals', {}) + allow_none_for_extension_args = env.directives['allow_none_for_extension_args'] + + f2s = env.fused_to_specific + env.fused_to_specific = None + + for arg in self.args: + if hasattr(arg, 'name'): + name_declarator = None + else: + base_type = arg.base_type.analyse(env) + # If we hare in pythran mode and we got a buffer supported by + # Pythran, we change this node to a fused type + if has_np_pythran(env) and base_type.is_pythran_expr: + base_type = PyrexTypes.FusedType([ + base_type, + #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")), + base_type.org_buffer]) + name_declarator, type = \ + arg.declarator.analyse(base_type, env) + arg.name = name_declarator.name + arg.type = type + + self.align_argument_type(env, arg) + if name_declarator and name_declarator.cname: + error(self.pos, "Python function argument cannot have C name specification") + arg.type = arg.type.as_argument_type() + arg.hdr_type = None + arg.needs_conversion = 0 + arg.needs_type_test = 0 + arg.is_generic = 1 + if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice: + if arg.or_none: + arg.accept_none = True + elif arg.not_none: + arg.accept_none = False + elif (arg.type.is_extension_type or arg.type.is_builtin_type + or arg.type.is_buffer or arg.type.is_memoryviewslice): + if arg.default and arg.default.constant_result is None: + # special case: def func(MyType obj = None) + arg.accept_none = True + else: + # default depends on compiler directive + arg.accept_none = allow_none_for_extension_args + else: + # probably just a plain 'object' + arg.accept_none = True + elif not arg.type.is_error: + arg.accept_none = True # won't be used, but must be there + if arg.not_none: + error(arg.pos, "Only Python type arguments can have 'not None'") + if arg.or_none: + error(arg.pos, "Only Python type arguments can have 'or None'") + + if arg.type.is_fused: + self.has_fused_arguments = True + env.fused_to_specific = f2s + + if has_np_pythran(env): + self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer] + else: + self.np_args_idx = [] + + def analyse_signature(self, env): + if self.entry.is_special: + if self.decorators: + error(self.pos, "special functions of cdef classes cannot have decorators") + self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg) + elif not (self.star_arg or self.starstar_arg) and ( + not env.directives['always_allow_keywords'] + or all([arg.pos_only for arg in self.args])): + # Use the simpler calling signature for zero- and one-argument pos-only functions. + if self.entry.signature is TypeSlots.pyfunction_signature: + if len(self.args) == 0: + self.entry.signature = TypeSlots.pyfunction_noargs + elif len(self.args) == 1: + if self.args[0].default is None and not self.args[0].kw_only: + self.entry.signature = TypeSlots.pyfunction_onearg + elif self.entry.signature is TypeSlots.pymethod_signature: + if len(self.args) == 1: + self.entry.signature = TypeSlots.unaryfunc + elif len(self.args) == 2: + if self.args[1].default is None and not self.args[1].kw_only: + self.entry.signature = TypeSlots.ibinaryfunc + + sig = self.entry.signature + nfixed = sig.max_num_fixed_args() + min_nfixed = sig.min_num_fixed_args() + if (sig is TypeSlots.pymethod_signature and nfixed == 1 + and len(self.args) == 0 and self.star_arg): + # this is the only case where a diverging number of + # arguments is not an error - when we have no explicit + # 'self' parameter as in method(*args) + sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used + self.self_in_stararg = 1 + nfixed = min_nfixed = 0 + + if self.is_staticmethod and env.is_c_class_scope: + nfixed = min_nfixed = 0 + self.self_in_stararg = True # FIXME: why for staticmethods? + + self.entry.signature = sig = copy.copy(sig) + sig.fixed_arg_format = "*" + sig.is_staticmethod = True + sig.has_generic_args = True + + if ((self.is_classmethod or self.is_staticmethod) and + self.has_fused_arguments and env.is_c_class_scope): + del self.decorator_indirection.stats[:] + + for i in range(min(nfixed, len(self.args))): + arg = self.args[i] + arg.is_generic = 0 + if i >= min_nfixed: + arg.is_special_method_optional = True + if sig.is_self_arg(i) and not self.is_staticmethod: + if self.is_classmethod: + arg.is_type_arg = 1 + arg.hdr_type = arg.type = Builtin.type_type + else: + arg.is_self_arg = 1 + arg.hdr_type = arg.type = env.parent_type + arg.needs_conversion = 0 + else: + arg.hdr_type = sig.fixed_arg_type(i) + if not arg.type.same_as(arg.hdr_type): + if arg.hdr_type.is_pyobject and arg.type.is_pyobject: + arg.needs_type_test = 1 + else: + arg.needs_conversion = 1 + + if min_nfixed > len(self.args): + self.bad_signature() + return + elif nfixed < len(self.args): + if not sig.has_generic_args: + self.bad_signature() + for arg in self.args: + if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type): + arg.needs_type_test = 1 + + # Decide whether to use METH_FASTCALL + # 1. If we use METH_NOARGS or METH_O, keep that. We can only change + # METH_VARARGS to METH_FASTCALL + # 2. Special methods like __call__ always use the METH_VARGARGS + # calling convention + mf = sig.method_flags() + if mf and TypeSlots.method_varargs in mf and not self.entry.is_special: + # 3. If the function uses the full args tuple, it's more + # efficient to use METH_VARARGS. This happens when the function + # takes *args but no other positional arguments (apart from + # possibly self). We don't do the analogous check for keyword + # arguments since the kwargs dict is copied anyway. + if self.star_arg: + uses_args_tuple = True + for arg in self.args: + if (arg.is_generic and not arg.kw_only and + not arg.is_self_arg and not arg.is_type_arg): + # Other positional argument + uses_args_tuple = False + else: + uses_args_tuple = False + + if not uses_args_tuple: + sig = self.entry.signature = sig.with_fastcall() + + def bad_signature(self): + sig = self.entry.signature + expected_str = "%d" % sig.min_num_fixed_args() + if sig.has_generic_args: + expected_str += " or more" + elif sig.optional_object_arg_count: + expected_str += " to %d" % sig.max_num_fixed_args() + name = self.name + if name.startswith("__") and name.endswith("__"): + desc = "Special method" + else: + desc = "Method" + error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % ( + desc, self.name, len(self.args), expected_str)) + + def declare_pyfunction(self, env): + #print "DefNode.declare_pyfunction:", self.name, "in", env ### + name = self.name + entry = env.lookup_here(name) + if entry: + if entry.is_final_cmethod and not env.parent_type.is_final_type: + error(self.pos, "Only final types can have final Python (def/cpdef) methods") + if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper: + warning(self.pos, "Overriding cdef method with def method.", 5) + entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper) + self.entry = entry + prefix = env.next_id(env.scope_prefix) + self.entry.pyfunc_cname = punycodify_name(Naming.pyfunc_prefix + prefix + name) + if Options.docstrings: + entry.doc = embed_position(self.pos, self.doc) + entry.doc_cname = punycodify_name(Naming.funcdoc_prefix + prefix + name) + if entry.is_special: + if entry.name in TypeSlots.invisible or not entry.doc or ( + entry.name in '__getattr__' and env.directives['fast_getattr']): + entry.wrapperbase_cname = None + else: + entry.wrapperbase_cname = punycodify_name(Naming.wrapperbase_prefix + prefix + name) + else: + entry.doc = None + + def declare_lambda_function(self, env): + entry = env.declare_lambda_function(self.lambda_name, self.pos) + entry.doc = None + self.entry = entry + self.entry.pyfunc_cname = entry.cname + + def declare_arguments(self, env): + for arg in self.args: + if not arg.name: + error(arg.pos, "Missing argument name") + if arg.needs_conversion: + arg.entry = env.declare_var(arg.name, arg.type, arg.pos) + if arg.type.is_pyobject: + arg.entry.init = "0" + else: + arg.entry = self.declare_argument(env, arg) + arg.entry.is_arg = 1 + arg.entry.used = 1 + arg.entry.is_self_arg = arg.is_self_arg + self.declare_python_arg(env, self.star_arg) + self.declare_python_arg(env, self.starstar_arg) + + def declare_python_arg(self, env, arg): + if arg: + if env.directives['infer_types'] != False: + type = PyrexTypes.unspecified_type + else: + type = py_object_type + entry = env.declare_var(arg.name, type, arg.pos) + entry.is_arg = 1 + entry.used = 1 + entry.init = "0" + entry.xdecref_cleanup = 1 + arg.entry = entry + + def analyse_expressions(self, env): + self.local_scope.directives = env.directives + self.analyse_default_values(env) + self.analyse_annotations(env) + + if not self.needs_assignment_synthesis(env) and self.decorators: + for decorator in self.decorators[::-1]: + decorator.decorator = decorator.decorator.analyse_expressions(env) + + self.py_wrapper.prepare_argument_coercion(env) + return self + + def needs_assignment_synthesis(self, env, code=None): + if self.is_staticmethod: + return True + if self.specialized_cpdefs or self.entry.is_fused_specialized: + return False + if self.no_assignment_synthesis: + return False + if self.entry.is_special: + return False + if self.entry.is_anonymous: + return True + if env.is_module_scope or env.is_c_class_scope: + if code is None: + return self.local_scope.directives['binding'] + else: + return code.globalstate.directives['binding'] + return env.is_py_class_scope or env.is_closure_scope + + def error_value(self): + return self.entry.signature.error_value + + def caller_will_check_exceptions(self): + return self.entry.signature.exception_check + + def generate_function_definitions(self, env, code): + if self.defaults_getter: + # defaults getter must never live in class scopes, it's always a module function + self.defaults_getter.generate_function_definitions(env.global_scope(), code) + + # Before closure cnames are mangled + if self.py_wrapper_required: + # func_cname might be modified by @cname + self.py_wrapper.func_cname = self.entry.func_cname + self.py_wrapper.generate_function_definitions(env, code) + FuncDefNode.generate_function_definitions(self, env, code) + + def generate_function_header(self, code, with_pymethdef, proto_only=0): + if proto_only: + if self.py_wrapper_required: + self.py_wrapper.generate_function_header( + code, with_pymethdef, True) + return + arg_code_list = [] + if self.entry.signature.has_dummy_arg: + self_arg = 'PyObject *%s' % Naming.self_cname + if not self.needs_outer_scope: + self_arg = 'CYTHON_UNUSED ' + self_arg + arg_code_list.append(self_arg) + + def arg_decl_code(arg): + entry = arg.entry + if entry.in_closure: + cname = entry.original_cname + else: + cname = entry.cname + decl = entry.type.declaration_code(cname) + if not entry.cf_used: + decl = 'CYTHON_UNUSED ' + decl + return decl + + for arg in self.args: + arg_code_list.append(arg_decl_code(arg)) + if self.star_arg: + arg_code_list.append(arg_decl_code(self.star_arg)) + if self.starstar_arg: + arg_code_list.append(arg_decl_code(self.starstar_arg)) + if arg_code_list: + arg_code = ', '.join(arg_code_list) + else: + arg_code = 'void' # No arguments + dc = self.return_type.declaration_code(self.entry.pyfunc_cname) + + decls_code = code.globalstate['decls'] + preprocessor_guard = self.get_preprocessor_guard() + if preprocessor_guard: + decls_code.putln(preprocessor_guard) + decls_code.putln( + "static %s(%s); /* proto */" % (dc, arg_code)) + if preprocessor_guard: + decls_code.putln("#endif") + code.putln("static %s(%s) {" % (dc, arg_code)) + + def generate_argument_declarations(self, env, code): + pass + + def generate_keyword_list(self, code): + pass + + def generate_argument_parsing_code(self, env, code): + # Move arguments into closure if required + def put_into_closure(entry): + if entry.in_closure: + if entry.type.is_array: + # This applies to generator expressions that iterate over C arrays (and need to + # capture them by value), under most other circumstances C array arguments are dropped to + # pointers so this copy isn't used + assert entry.type.size is not None + code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + code.putln("memcpy({0}, {1}, sizeof({0}));".format(entry.cname, entry.original_cname)) + else: + code.putln('%s = %s;' % (entry.cname, entry.original_cname)) + if entry.type.is_memoryviewslice: + # TODO - at some point reference count of memoryviews should + # genuinely be unified with PyObjects + entry.type.generate_incref_memoryviewslice(code, entry.cname, True) + elif entry.xdecref_cleanup: + # mostly applies to the starstar arg - this can sometimes be NULL + # so must be xincrefed instead + code.put_var_xincref(entry) + code.put_var_xgiveref(entry) + else: + code.put_var_incref(entry) + code.put_var_giveref(entry) + for arg in self.args: + put_into_closure(arg.entry) + for arg in self.star_arg, self.starstar_arg: + if arg: + put_into_closure(arg.entry) + + def generate_argument_type_tests(self, code): + pass + + +class DefNodeWrapper(FuncDefNode): + # DefNode python wrapper code generator + + defnode = None + target = None # Target DefNode + needs_values_cleanup = False + + def __init__(self, *args, **kwargs): + FuncDefNode.__init__(self, *args, **kwargs) + self.num_posonly_args = self.target.num_posonly_args + self.num_kwonly_args = self.target.num_kwonly_args + self.num_required_kw_args = self.target.num_required_kw_args + self.num_required_args = self.target.num_required_args + self.self_in_stararg = self.target.self_in_stararg + self.signature = None + + def analyse_declarations(self, env): + target_entry = self.target.entry + name = self.name + prefix = env.next_id(env.scope_prefix) + target_entry.func_cname = punycodify_name(Naming.pywrap_prefix + prefix + name) + target_entry.pymethdef_cname = punycodify_name(Naming.pymethdef_prefix + prefix + name) + + self.signature = target_entry.signature + + self.np_args_idx = self.target.np_args_idx + + def prepare_argument_coercion(self, env): + # This is only really required for Cython utility code at this time, + # everything else can be done during code generation. But we expand + # all utility code here, simply because we cannot easily distinguish + # different code types. + for arg in self.args: + if not arg.type.is_pyobject: + if not arg.type.create_from_py_utility_code(env): + pass # will fail later + elif arg.hdr_type and not arg.hdr_type.is_pyobject: + if not arg.hdr_type.create_to_py_utility_code(env): + pass # will fail later + + if self.starstar_arg and not self.starstar_arg.entry.cf_used: + # we will set the kwargs argument to NULL instead of a new dict + # and must therefore correct the control flow state + entry = self.starstar_arg.entry + entry.xdecref_cleanup = 1 + for ass in entry.cf_assignments: + if not ass.is_arg and ass.lhs.is_name: + ass.lhs.cf_maybe_null = True + + def signature_has_nongeneric_args(self): + argcount = len(self.args) + if argcount == 0 or ( + argcount == 1 and (self.args[0].is_self_arg or + self.args[0].is_type_arg)): + return 0 + return 1 + + def signature_has_generic_args(self): + return self.signature.has_generic_args + + def generate_function_body(self, code): + args = [] + if self.signature.has_dummy_arg: + args.append(Naming.self_cname) + for arg in self.args: + if arg.type.is_cpp_class: + # it's safe to move converted C++ types because they aren't + # used again afterwards + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) + args.append("__PYX_STD_MOVE_IF_SUPPORTED(%s)" % arg.entry.cname) + elif arg.hdr_type and not (arg.type.is_memoryviewslice or + arg.type.is_struct or + arg.type.is_complex): + args.append(arg.type.cast_code(arg.entry.cname)) + else: + args.append(arg.entry.cname) + if self.star_arg: + args.append(self.star_arg.entry.cname) + if self.starstar_arg: + args.append(self.starstar_arg.entry.cname) + args = ', '.join(args) + if not self.return_type.is_void: + code.put('%s = ' % Naming.retval_cname) + code.putln('%s(%s);' % ( + self.target.entry.pyfunc_cname, args)) + + def generate_function_definitions(self, env, code): + lenv = self.target.local_scope + # Generate C code for header and body of function + code.mark_pos(self.pos) + code.putln("") + code.putln("/* Python wrapper */") + preprocessor_guard = self.target.get_preprocessor_guard() + if preprocessor_guard: + code.putln(preprocessor_guard) + + code.enter_cfunc_scope(lenv) + code.return_from_error_cleanup_label = code.new_label() + + with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or + self.target.pymethdef_required) + self.generate_function_header(code, with_pymethdef) + self.generate_argument_declarations(lenv, code) + tempvardecl_code = code.insertion_point() + + if self.return_type.is_pyobject: + retval_init = ' = 0' + else: + retval_init = '' + if not self.return_type.is_void: + code.putln('%s%s;' % ( + self.return_type.declaration_code(Naming.retval_cname), + retval_init)) + code.put_declare_refcount_context() + code.put_setup_refcount_context(EncodedString('%s (wrapper)' % self.name)) + + self.generate_argument_parsing_code(lenv, code, tempvardecl_code) + self.generate_argument_type_tests(code) + self.generate_function_body(code) + + # ----- Go back and insert temp variable declarations + tempvardecl_code.put_temp_declarations(code.funcstate) + + code.mark_pos(self.pos) + code.putln("") + code.putln("/* function exit code */") + + # ----- Error cleanup + if code.error_label in code.labels_used: + code.put_goto(code.return_label) + code.put_label(code.error_label) + for cname, type in code.funcstate.all_managed_temps(): + code.put_xdecref(cname, type) + err_val = self.error_value() + if err_val is not None: + code.putln("%s = %s;" % (Naming.retval_cname, err_val)) + + # ----- Non-error return cleanup + code.put_label(code.return_label) + for entry in lenv.var_entries: + if entry.is_arg: + if entry.xdecref_cleanup: + code.put_var_xdecref(entry) + else: + code.put_var_decref(entry) + var_entries_set = set(lenv.var_entries) + for arg in self.args: + if not arg.type.is_pyobject and arg.entry not in var_entries_set: + # This captures anything that's been converted from a PyObject. + # Primarily memoryviews at the moment + if arg.entry.xdecref_cleanup: + code.put_var_xdecref(arg.entry) + else: + code.put_var_decref(arg.entry) + + self.generate_argument_values_cleanup_code(code) + code.put_finish_refcount_context() + if not self.return_type.is_void: + code.putln("return %s;" % Naming.retval_cname) + code.putln('}') + code.exit_cfunc_scope() + if preprocessor_guard: + code.putln("#endif /*!(%s)*/" % preprocessor_guard) + + def generate_function_header(self, code, with_pymethdef, proto_only=0): + arg_code_list = [] + sig = self.signature + + if sig.has_dummy_arg or self.self_in_stararg: + arg_code = "PyObject *%s" % Naming.self_cname + if not sig.has_dummy_arg: + arg_code = 'CYTHON_UNUSED ' + arg_code + arg_code_list.append(arg_code) + + for arg in self.args: + if not arg.is_generic: + if arg.is_self_arg or arg.is_type_arg: + arg_code_list.append("PyObject *%s" % arg.hdr_cname) + else: + arg_code_list.append( + arg.hdr_type.declaration_code(arg.hdr_cname)) + entry = self.target.entry + if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]: + arg_code_list.append("CYTHON_UNUSED PyObject *unused") + if sig.has_generic_args: + varargs_args = "PyObject *%s, PyObject *%s" % ( + Naming.args_cname, Naming.kwds_cname) + if sig.use_fastcall: + fastcall_args = "PyObject *const *%s, Py_ssize_t %s, PyObject *%s" % ( + Naming.args_cname, Naming.nargs_cname, Naming.kwds_cname) + arg_code_list.append( + "\n#if CYTHON_METH_FASTCALL\n%s\n#else\n%s\n#endif\n" % ( + fastcall_args, varargs_args)) + else: + arg_code_list.append(varargs_args) + if entry.is_special: + for n in range(len(self.args), sig.max_num_fixed_args()): + arg_code_list.append("CYTHON_UNUSED PyObject *unused_arg_%s" % n) + arg_code = ", ".join(arg_code_list) + + # Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__' + mf = "" + if (entry.name in ("__getbuffer__", "__releasebuffer__") + and entry.scope.is_c_class_scope): + mf = "CYTHON_UNUSED " + with_pymethdef = False + + dc = self.return_type.declaration_code(entry.func_cname) + header = "%sstatic %s(%s)" % (mf, dc, arg_code) + code.putln("%s; /*proto*/" % header) + + if proto_only: + if self.target.fused_py_func: + # If we are the specialized version of the cpdef, we still + # want the prototype for the "fused cpdef", in case we're + # checking to see if our method was overridden in Python + self.target.fused_py_func.generate_function_header( + code, with_pymethdef, proto_only=True) + return + + if (Options.docstrings and entry.doc and + not self.target.fused_py_func and + not entry.scope.is_property_scope and + (not entry.is_special or entry.wrapperbase_cname)): + # h_code = code.globalstate['h_code'] + docstr = entry.doc + + if docstr.is_unicode: + docstr = docstr.as_utf8_string() + + if not (entry.is_special and entry.name in ('__getbuffer__', '__releasebuffer__')): + code.putln('PyDoc_STRVAR(%s, %s);' % ( + entry.doc_cname, + docstr.as_c_string_literal())) + + if entry.is_special: + code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC') + code.putln( + "struct wrapperbase %s;" % entry.wrapperbase_cname) + code.putln('#endif') + + if with_pymethdef or self.target.fused_py_func: + code.put( + "static PyMethodDef %s = " % entry.pymethdef_cname) + code.put_pymethoddef(self.target.entry, ";", allow_skip=False) + code.putln("%s {" % header) + + def generate_argument_declarations(self, env, code): + for arg in self.args: + if arg.is_generic: + if arg.needs_conversion: + code.putln("PyObject *%s = 0;" % arg.hdr_cname) + else: + code.put_var_declaration(arg.entry) + for entry in env.var_entries: + if entry.is_arg: + code.put_var_declaration(entry) + + # Create nargs, but avoid an "unused" warning in the few cases where we don't need it. + if self.signature_has_generic_args(): + # error handling for this is checked after the declarations + nargs_code = "CYTHON_UNUSED Py_ssize_t %s;" % Naming.nargs_cname + if self.signature.use_fastcall: + code.putln("#if !CYTHON_METH_FASTCALL") + code.putln(nargs_code) + code.putln("#endif") + else: + code.putln(nargs_code) + + # Array containing the values of keyword arguments when using METH_FASTCALL. + code.putln('CYTHON_UNUSED PyObject *const *%s;' % Naming.kwvalues_cname) + + def generate_argument_parsing_code(self, env, code, decl_code): + # Generate fast equivalent of PyArg_ParseTuple call for + # generic arguments, if any, including args/kwargs + old_error_label = code.new_error_label() + our_error_label = code.error_label + end_label = code.new_label("argument_unpacking_done") + + has_kwonly_args = self.num_kwonly_args > 0 + has_star_or_kw_args = self.star_arg is not None \ + or self.starstar_arg is not None or has_kwonly_args + + for arg in self.args: + if not arg.type.is_pyobject: + if not arg.type.create_from_py_utility_code(env): + pass # will fail later + + # Assign nargs variable as len(args). + if self.signature_has_generic_args(): + if self.signature.use_fastcall: + code.putln("#if !CYTHON_METH_FASTCALL") + code.putln("#if CYTHON_ASSUME_SAFE_MACROS") + code.putln("%s = PyTuple_GET_SIZE(%s);" % ( + Naming.nargs_cname, Naming.args_cname)) + code.putln("#else") + # An error here is very unlikely, but we risk a (conditionally) unused error label, + # so we just skip the traceback and return immediately. + code.putln("%s = PyTuple_Size(%s); if (%s) return %s;" % ( + Naming.nargs_cname, + Naming.args_cname, + code.unlikely("%s < 0" % Naming.nargs_cname), + self.error_value(), + )) + code.putln("#endif") + if self.signature.use_fastcall: + code.putln("#endif") + code.globalstate.use_utility_code( + UtilityCode.load_cached("fastcall", "FunctionArguments.c")) + code.putln('%s = __Pyx_KwValues_%s(%s, %s);' % ( + Naming.kwvalues_cname, self.signature.fastvar, Naming.args_cname, Naming.nargs_cname)) + + if not self.signature_has_generic_args(): + if has_star_or_kw_args: + error(self.pos, "This method cannot have * or keyword arguments") + self.generate_argument_conversion_code(code) + + elif not self.signature_has_nongeneric_args(): + # func(*args) or func(**kw) or func(*args, **kw) + # possibly with a "self" argument but no other non-star + # arguments + self.generate_stararg_copy_code(code) + + else: + self.generate_tuple_and_keyword_parsing_code(self.args, code, decl_code) + self.needs_values_cleanup = True + + code.error_label = old_error_label + if code.label_used(our_error_label): + if not code.label_used(end_label): + code.put_goto(end_label) + code.put_label(our_error_label) + self.generate_argument_values_cleanup_code(code) + + if has_star_or_kw_args: + self.generate_arg_decref(self.star_arg, code) + if self.starstar_arg: + if self.starstar_arg.entry.xdecref_cleanup: + code.put_var_xdecref_clear(self.starstar_arg.entry) + else: + code.put_var_decref_clear(self.starstar_arg.entry) + for arg in self.args: + if not arg.type.is_pyobject and arg.type.needs_refcounting: + # at the moment this just catches memoryviewslices, but in future + # other non-PyObject reference counted types might need cleanup + code.put_var_xdecref(arg.entry) + code.put_add_traceback(self.target.entry.qualified_name) + code.put_finish_refcount_context() + code.putln("return %s;" % self.error_value()) + if code.label_used(end_label): + code.put_label(end_label) + + def generate_arg_xdecref(self, arg, code): + if arg: + code.put_var_xdecref_clear(arg.entry) + + def generate_arg_decref(self, arg, code): + if arg: + code.put_var_decref_clear(arg.entry) + + def generate_stararg_copy_code(self, code): + if not self.star_arg: + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) + code.putln("if (unlikely(%s > 0)) {" % Naming.nargs_cname) + # Direct return simplifies **kwargs cleanup, but we give no traceback. + code.put('__Pyx_RaiseArgtupleInvalid(%s, 1, 0, 0, %s); return %s;' % ( + self.name.as_c_string_literal(), Naming.nargs_cname, self.error_value())) + code.putln("}") + + if self.starstar_arg: + if self.star_arg or not self.starstar_arg.entry.cf_used: + kwarg_check = "unlikely(%s)" % Naming.kwds_cname + else: + kwarg_check = "%s" % Naming.kwds_cname + else: + kwarg_check = "unlikely(%s) && __Pyx_NumKwargs_%s(%s)" % ( + Naming.kwds_cname, self.signature.fastvar, Naming.kwds_cname) + code.globalstate.use_utility_code( + UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c")) + code.putln( + "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, %s, %d))) return %s;" % ( + kwarg_check, Naming.kwds_cname, self.name.as_c_string_literal(), + bool(self.starstar_arg), self.error_value())) + + if self.starstar_arg and self.starstar_arg.entry.cf_used: + code.putln("if (%s) {" % kwarg_check) + code.putln("%s = __Pyx_KwargsAsDict_%s(%s, %s);" % ( + self.starstar_arg.entry.cname, + self.signature.fastvar, + Naming.kwds_cname, + Naming.kwvalues_cname)) + code.putln("if (unlikely(!%s)) return %s;" % ( + self.starstar_arg.entry.cname, self.error_value())) + code.put_gotref(self.starstar_arg.entry.cname, py_object_type) + code.putln("} else {") + code.putln("%s = PyDict_New();" % (self.starstar_arg.entry.cname,)) + code.putln("if (unlikely(!%s)) return %s;" % ( + self.starstar_arg.entry.cname, self.error_value())) + code.put_var_gotref(self.starstar_arg.entry) + self.starstar_arg.entry.xdecref_cleanup = False + code.putln("}") + + # Normal (traceback) error handling from this point on to clean up the kwargs dict. + + if self.self_in_stararg and not self.target.is_staticmethod: + assert not self.signature.use_fastcall + # need to create a new tuple with 'self' inserted as first item + code.putln("%s = PyTuple_New(%s + 1); %s" % ( + self.star_arg.entry.cname, + Naming.nargs_cname, + code.error_goto_if_null(self.star_arg.entry.cname, self.pos) + )) + code.put_var_gotref(self.star_arg.entry) + code.put_incref(Naming.self_cname, py_object_type) + code.put_giveref(Naming.self_cname, py_object_type) + code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % ( + self.star_arg.entry.cname, Naming.self_cname)) + temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) + code.putln("for (%s=0; %s < %s; %s++) {" % ( + temp, temp, Naming.nargs_cname, temp)) + code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % ( + Naming.args_cname, temp)) + code.put_incref("item", py_object_type) + code.put_giveref("item", py_object_type) + code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % ( + self.star_arg.entry.cname, temp)) + code.putln("}") + code.funcstate.release_temp(temp) + self.star_arg.entry.xdecref_cleanup = 0 + elif self.star_arg: + assert not self.signature.use_fastcall + code.put_incref(Naming.args_cname, py_object_type) + code.putln("%s = %s;" % ( + self.star_arg.entry.cname, + Naming.args_cname)) + self.star_arg.entry.xdecref_cleanup = 0 + + def generate_tuple_and_keyword_parsing_code(self, args, code, decl_code): + code.globalstate.use_utility_code( + UtilityCode.load_cached("fastcall", "FunctionArguments.c")) + + self_name_csafe = self.name.as_c_string_literal() + + argtuple_error_label = code.new_label("argtuple_error") + + positional_args = [] + required_kw_only_args = [] + optional_kw_only_args = [] + num_pos_only_args = 0 + for arg in args: + if arg.is_generic: + if arg.default: + if not arg.is_self_arg and not arg.is_type_arg: + if arg.kw_only: + optional_kw_only_args.append(arg) + else: + positional_args.append(arg) + elif arg.kw_only: + required_kw_only_args.append(arg) + elif not arg.is_self_arg and not arg.is_type_arg: + positional_args.append(arg) + if arg.pos_only: + num_pos_only_args += 1 + + # sort required kw-only args before optional ones to avoid special + # cases in the unpacking code + kw_only_args = required_kw_only_args + optional_kw_only_args + + min_positional_args = self.num_required_args - self.num_required_kw_args + if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg): + min_positional_args -= 1 + max_positional_args = len(positional_args) + has_fixed_positional_count = not self.star_arg and \ + min_positional_args == max_positional_args + has_kw_only_args = bool(kw_only_args) + + if self.starstar_arg or self.star_arg: + self.generate_stararg_init_code(max_positional_args, code) + + code.putln('{') + all_args = tuple(positional_args) + tuple(kw_only_args) + non_posonly_args = [arg for arg in all_args if not arg.pos_only] + non_pos_args_id = ','.join( + ['&%s' % code.intern_identifier(arg.entry.name) for arg in non_posonly_args] + ['0']) + code.putln("PyObject **%s[] = {%s};" % ( + Naming.pykwdlist_cname, + non_pos_args_id)) + + # Before being converted and assigned to the target variables, + # borrowed references to all unpacked argument values are + # collected into a local PyObject* array called "values", + # regardless if they were taken from default arguments, + # positional arguments or keyword arguments. Note that + # C-typed default arguments are handled at conversion time, + # so their array value is NULL in the end if no argument + # was passed for them. + self.generate_argument_values_setup_code(all_args, code, decl_code) + + # If all args are positional-only, we can raise an error + # straight away if we receive a non-empty kw-dict. + # This requires a PyDict_Size call. This call is wasteful + # for functions which do accept kw-args, so we do not generate + # the PyDict_Size call unless all args are positional-only. + accept_kwd_args = non_posonly_args or self.starstar_arg + if accept_kwd_args: + kw_unpacking_condition = Naming.kwds_cname + else: + kw_unpacking_condition = "%s && __Pyx_NumKwargs_%s(%s) > 0" % ( + Naming.kwds_cname, self.signature.fastvar, Naming.kwds_cname) + + if self.num_required_kw_args > 0: + kw_unpacking_condition = "likely(%s)" % kw_unpacking_condition + + # --- optimised code when we receive keyword arguments + code.putln("if (%s) {" % kw_unpacking_condition) + + if accept_kwd_args: + self.generate_keyword_unpacking_code( + min_positional_args, max_positional_args, + has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code) + else: + # Here we do not accept kw-args but we are passed a non-empty kw-dict. + # We call ParseOptionalKeywords which will raise an appropriate error if + # the kw-args dict passed is non-empty (which it will be, since kw_unpacking_condition is true) + code.globalstate.use_utility_code( + UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c")) + code.putln('if (likely(__Pyx_ParseOptionalKeywords(%s, %s, %s, %s, %s, %s, %s) < 0)) %s' % ( + Naming.kwds_cname, + Naming.kwvalues_cname, + Naming.pykwdlist_cname, + self.starstar_arg.entry.cname if self.starstar_arg else 0, + 'values', + 0, + self_name_csafe, + code.error_goto(self.pos))) + + # --- optimised code when we do not receive any keyword arguments + if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args: + # Python raises arg tuple related errors first, so we must + # check the length here + if min_positional_args == max_positional_args and not self.star_arg: + compare = '!=' + else: + compare = '<' + code.putln('} else if (unlikely(%s %s %d)) {' % ( + Naming.nargs_cname, compare, min_positional_args)) + code.put_goto(argtuple_error_label) + + if self.num_required_kw_args: + # pure error case: keywords required but not passed + if max_positional_args > min_positional_args and not self.star_arg: + code.putln('} else if (unlikely(%s > %d)) {' % ( + Naming.nargs_cname, max_positional_args)) + code.put_goto(argtuple_error_label) + code.putln('} else {') + for i, arg in enumerate(kw_only_args): + if not arg.default: + pystring_cname = code.intern_identifier(arg.entry.name) + # required keyword-only argument missing + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c")) + code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( + self.name, + pystring_cname)) + code.putln(code.error_goto(self.pos)) + break + + else: + # optimised tuple unpacking code + code.putln('} else {') + if min_positional_args == max_positional_args: + # parse the exact number of positional arguments from + # the args tuple + for i, arg in enumerate(positional_args): + code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % ( + i, self.signature.fastvar, Naming.args_cname, i)) + else: + # parse the positional arguments from the variable length + # args tuple and reject illegal argument tuple sizes + code.putln('switch (%s) {' % Naming.nargs_cname) + if self.star_arg: + code.putln('default:') + reversed_args = list(enumerate(positional_args))[::-1] + for i, arg in reversed_args: + if i >= min_positional_args-1: + if i != reversed_args[0][0]: + code.putln('CYTHON_FALLTHROUGH;') + code.put('case %2d: ' % (i+1)) + code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % ( + i, self.signature.fastvar, Naming.args_cname, i)) + if min_positional_args == 0: + code.putln('CYTHON_FALLTHROUGH;') + code.put('case 0: ') + code.putln('break;') + if self.star_arg: + if min_positional_args: + for i in range(min_positional_args-1, -1, -1): + code.putln('case %2d:' % i) + code.put_goto(argtuple_error_label) + else: + code.put('default: ') + code.put_goto(argtuple_error_label) + code.putln('}') + + code.putln('}') # end of the conditional unpacking blocks + + # Convert arg values to their final type and assign them. + # Also inject non-Python default arguments, which do cannot + # live in the values[] array. + for i, arg in enumerate(all_args): + self.generate_arg_assignment(arg, "values[%d]" % i, code) + + code.putln('}') # end of the whole argument unpacking block + + if code.label_used(argtuple_error_label): + skip_error_handling = code.new_label("skip") + code.put_goto(skip_error_handling) + + code.put_label(argtuple_error_label) + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) + code.putln('__Pyx_RaiseArgtupleInvalid(%s, %d, %d, %d, %s); %s' % ( + self_name_csafe, has_fixed_positional_count, + min_positional_args, max_positional_args, + Naming.nargs_cname, + code.error_goto(self.pos) + )) + code.put_label(skip_error_handling) + + def generate_arg_assignment(self, arg, item, code): + if arg.type.is_pyobject: + # Python default arguments were already stored in 'item' at the very beginning + if arg.is_generic: + item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item) + entry = arg.entry + code.putln("%s = %s;" % (entry.cname, item)) + else: + if arg.type.from_py_function: + if arg.default: + # C-typed default arguments must be handled here + code.putln('if (%s) {' % item) + code.putln(arg.type.from_py_call_code( + item, arg.entry.cname, arg.pos, code)) + if arg.default: + code.putln('} else {') + code.putln("%s = %s;" % ( + arg.entry.cname, + arg.calculate_default_value_code(code))) + if arg.type.is_memoryviewslice: + code.put_var_incref_memoryviewslice(arg.entry, have_gil=True) + code.putln('}') + else: + error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type) + + def generate_stararg_init_code(self, max_positional_args, code): + if self.starstar_arg: + self.starstar_arg.entry.xdecref_cleanup = 0 + code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % ( + self.starstar_arg.entry.cname, + self.starstar_arg.entry.cname, + self.error_value())) + code.put_var_gotref(self.starstar_arg.entry) + if self.star_arg: + self.star_arg.entry.xdecref_cleanup = 0 + if max_positional_args == 0: + # If there are no positional arguments, use the args tuple + # directly + assert not self.signature.use_fastcall + code.put_incref(Naming.args_cname, py_object_type) + code.putln("%s = %s;" % (self.star_arg.entry.cname, Naming.args_cname)) + else: + # It is possible that this is a slice of "negative" length, + # as in args[5:3]. That's not a problem, the function below + # handles that efficiently and returns the empty tuple. + code.putln('%s = __Pyx_ArgsSlice_%s(%s, %d, %s);' % ( + self.star_arg.entry.cname, self.signature.fastvar, + Naming.args_cname, max_positional_args, Naming.nargs_cname)) + code.putln("if (unlikely(!%s)) {" % + self.star_arg.entry.type.nullcheck_string(self.star_arg.entry.cname)) + if self.starstar_arg: + code.put_var_decref_clear(self.starstar_arg.entry) + code.put_finish_refcount_context() + code.putln('return %s;' % self.error_value()) + code.putln('}') + code.put_var_gotref(self.star_arg.entry) + + def generate_argument_values_setup_code(self, args, code, decl_code): + max_args = len(args) + # the 'values' array collects references to arguments + # before doing any type coercion etc.. Whether they are borrowed or not + # depends on the compilation options. + decl_code.putln("PyObject* values[%d] = {%s};" % ( + max_args, ','.join('0'*max_args))) + + if self.target.defaults_struct: + code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % ( + self.target.defaults_struct, Naming.dynamic_args_cname, + self.target.defaults_struct, Naming.self_cname)) + + # assign (usually borrowed) Python default values to the values array, + # so that they can be overwritten by received arguments below + for i, arg in enumerate(args): + if arg.default and arg.type.is_pyobject: + default_value = arg.calculate_default_value_code(code) + code.putln('values[%d] = __Pyx_Arg_NewRef_%s(%s);' % ( + i, self.signature.fastvar, arg.type.as_pyobject(default_value))) + + def generate_argument_values_cleanup_code(self, code): + if not self.needs_values_cleanup: + return + # The 'values' array may not be borrowed depending on the compilation options. + # This cleans it up in the case it isn't borrowed + loop_var = Naming.quick_temp_cname + code.putln("{") + code.putln("Py_ssize_t %s;" % loop_var) + code.putln("for (%s=0; %s < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++%s) {" % ( + loop_var, loop_var, loop_var)) + code.putln("__Pyx_Arg_XDECREF_%s(values[%s]);" % (self.signature.fastvar, loop_var)) + code.putln("}") + code.putln("}") + + def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args, + has_fixed_positional_count, + has_kw_only_args, all_args, argtuple_error_label, code): + # First we count how many arguments must be passed as positional + num_required_posonly_args = num_pos_only_args = 0 + for i, arg in enumerate(all_args): + if arg.pos_only: + num_pos_only_args += 1 + if not arg.default: + num_required_posonly_args += 1 + + code.putln('Py_ssize_t kw_args;') + # copy the values from the args tuple and check that it's not too long + code.putln('switch (%s) {' % Naming.nargs_cname) + if self.star_arg: + code.putln('default:') + + for i in range(max_positional_args-1, num_required_posonly_args-1, -1): + code.put('case %2d: ' % (i+1)) + code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % ( + i, self.signature.fastvar, Naming.args_cname, i)) + code.putln('CYTHON_FALLTHROUGH;') + if num_required_posonly_args > 0: + code.put('case %2d: ' % num_required_posonly_args) + for i in range(num_required_posonly_args-1, -1, -1): + code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % ( + i, self.signature.fastvar, Naming.args_cname, i)) + code.putln('break;') + for i in range(num_required_posonly_args-2, -1, -1): + code.put('case %2d: ' % (i+1)) + code.putln('CYTHON_FALLTHROUGH;') + + code.put('case 0: ') + if num_required_posonly_args == 0: + code.putln('break;') + else: + # catch-all for not enough pos-only args passed + code.put_goto(argtuple_error_label) + if not self.star_arg: + code.put('default: ') # more arguments than allowed + code.put_goto(argtuple_error_label) + code.putln('}') + + # The code above is very often (but not always) the same as + # the optimised non-kwargs tuple unpacking code, so we keep + # the code block above at the very top, before the following + # 'external' PyDict_Size() call, to make it easy for the C + # compiler to merge the two separate tuple unpacking + # implementations into one when they turn out to be identical. + + # If we received kwargs, fill up the positional/required + # arguments with values from the kw dict + self_name_csafe = self.name.as_c_string_literal() + + code.putln('kw_args = __Pyx_NumKwargs_%s(%s);' % ( + self.signature.fastvar, Naming.kwds_cname)) + if self.num_required_args or max_positional_args > 0: + last_required_arg = -1 + for i, arg in enumerate(all_args): + if not arg.default: + last_required_arg = i + if last_required_arg < max_positional_args: + last_required_arg = max_positional_args-1 + if max_positional_args > num_pos_only_args: + code.putln('switch (%s) {' % Naming.nargs_cname) + for i, arg in enumerate(all_args[num_pos_only_args:last_required_arg+1], num_pos_only_args): + if max_positional_args > num_pos_only_args and i <= max_positional_args: + if i != num_pos_only_args: + code.putln('CYTHON_FALLTHROUGH;') + if self.star_arg and i == max_positional_args: + code.putln('default:') + else: + code.putln('case %2d:' % i) + pystring_cname = code.intern_identifier(arg.entry.name) + if arg.default: + if arg.kw_only: + # optional kw-only args are handled separately below + continue + code.putln('if (kw_args > 0) {') + # don't overwrite default argument + code.putln('PyObject* value = __Pyx_GetKwValue_%s(%s, %s, %s);' % ( + self.signature.fastvar, Naming.kwds_cname, Naming.kwvalues_cname, pystring_cname)) + code.putln('if (value) { values[%d] = __Pyx_Arg_NewRef_%s(value); kw_args--; }' % ( + i, self.signature.fastvar)) + code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos)) + code.putln('}') + else: + code.putln('if (likely((values[%d] = __Pyx_GetKwValue_%s(%s, %s, %s)) != 0)) {' % ( + i, self.signature.fastvar, Naming.kwds_cname, Naming.kwvalues_cname, pystring_cname)) + code.putln('(void)__Pyx_Arg_NewRef_%s(values[%d]);' % (self.signature.fastvar, i)) + code.putln('kw_args--;') + code.putln('}') + code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos)) + if i < min_positional_args: + if i == 0: + # special case: we know arg 0 is missing + code.put('else ') + code.put_goto(argtuple_error_label) + else: + # print the correct number of values (args or + # kwargs) that were passed into positional + # arguments up to this point + code.putln('else {') + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) + code.put('__Pyx_RaiseArgtupleInvalid(%s, %d, %d, %d, %d); ' % ( + self_name_csafe, has_fixed_positional_count, + min_positional_args, max_positional_args, i)) + code.putln(code.error_goto(self.pos)) + code.putln('}') + elif arg.kw_only: + code.putln('else {') + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c")) + code.put('__Pyx_RaiseKeywordRequired(%s, %s); ' % ( + self_name_csafe, pystring_cname)) + code.putln(code.error_goto(self.pos)) + code.putln('}') + if max_positional_args > num_pos_only_args: + code.putln('}') + + if has_kw_only_args: + # unpack optional keyword-only arguments separately because + # checking for interned strings in a dict is faster than iterating + self.generate_optional_kwonly_args_unpacking_code(all_args, code) + + code.putln('if (unlikely(kw_args > 0)) {') + # non-positional/-required kw args left in dict: default args, + # kw-only args, **kwargs or error + # + # This is sort of a catch-all: except for checking required + # arguments, this will always do the right thing for unpacking + # keyword arguments, so that we can concentrate on optimising + # common cases above. + # + # ParseOptionalKeywords() needs to know how many of the arguments + # that could be passed as keywords have in fact been passed as + # positional args. + if num_pos_only_args > 0: + # There are positional-only arguments which we don't want to count, + # since they cannot be keyword arguments. Subtract the number of + # pos-only arguments from the number of positional arguments we got. + # If we get a negative number then none of the keyword arguments were + # passed as positional args. + code.putln('const Py_ssize_t kwd_pos_args = (unlikely(%s < %d)) ? 0 : %s - %d;' % ( + Naming.nargs_cname, num_pos_only_args, + Naming.nargs_cname, num_pos_only_args, + )) + elif max_positional_args > 0: + code.putln('const Py_ssize_t kwd_pos_args = %s;' % Naming.nargs_cname) + + if max_positional_args == 0: + pos_arg_count = "0" + elif self.star_arg: + # If there is a *arg, the number of used positional args could be larger than + # the number of possible keyword arguments. But ParseOptionalKeywords() uses the + # number of positional args as an index into the keyword argument name array, + # if this is larger than the number of kwd args we get a segfault. So round + # this down to max_positional_args - num_pos_only_args (= num possible kwd args). + code.putln("const Py_ssize_t used_pos_args = (kwd_pos_args < %d) ? kwd_pos_args : %d;" % ( + max_positional_args - num_pos_only_args, max_positional_args - num_pos_only_args)) + pos_arg_count = "used_pos_args" + else: + pos_arg_count = "kwd_pos_args" + if num_pos_only_args < len(all_args): + values_array = 'values + %d' % num_pos_only_args + else: + values_array = 'values' + code.globalstate.use_utility_code( + UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c")) + code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, %s, %s, %s, %s) < 0)) %s' % ( + Naming.kwds_cname, + Naming.kwvalues_cname, + Naming.pykwdlist_cname, + self.starstar_arg and self.starstar_arg.entry.cname or '0', + values_array, + pos_arg_count, + self_name_csafe, + code.error_goto(self.pos))) + code.putln('}') + + def generate_optional_kwonly_args_unpacking_code(self, all_args, code): + optional_args = [] + first_optional_arg = -1 + num_posonly_args = 0 + for i, arg in enumerate(all_args): + if arg.pos_only: + num_posonly_args += 1 + if not arg.kw_only or not arg.default: + continue + if not optional_args: + first_optional_arg = i + optional_args.append(arg.name) + if num_posonly_args > 0: + posonly_correction = '-%d' % num_posonly_args + else: + posonly_correction = '' + if optional_args: + if len(optional_args) > 1: + # if we receive more than the named kwargs, we either have **kwargs + # (in which case we must iterate anyway) or it's an error (which we + # also handle during iteration) => skip this part if there are more + code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % ( + not self.starstar_arg and 'likely' or '', + len(optional_args))) + code.putln('Py_ssize_t index;') + # not unrolling the loop here reduces the C code overhead + code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % ( + first_optional_arg, first_optional_arg + len(optional_args))) + else: + code.putln('if (kw_args == 1) {') + code.putln('const Py_ssize_t index = %d;' % first_optional_arg) + code.putln('PyObject* value = __Pyx_GetKwValue_%s(%s, %s, *%s[index%s]);' % ( + self.signature.fastvar, + Naming.kwds_cname, + Naming.kwvalues_cname, + Naming.pykwdlist_cname, + posonly_correction)) + code.putln('if (value) { values[index] = __Pyx_Arg_NewRef_%s(value); kw_args--; }' % + self.signature.fastvar) + code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos)) + if len(optional_args) > 1: + code.putln('}') + code.putln('}') + + def generate_argument_conversion_code(self, code): + # Generate code to convert arguments from signature type to + # declared type, if needed. Also copies signature arguments + # into closure fields. + for arg in self.args: + if arg.needs_conversion: + self.generate_arg_conversion(arg, code) + + def generate_arg_conversion(self, arg, code): + # Generate conversion code for one argument. + old_type = arg.hdr_type + new_type = arg.type + if old_type.is_pyobject: + if arg.default: + code.putln("if (%s) {" % arg.hdr_cname) + else: + code.putln("assert(%s); {" % arg.hdr_cname) + self.generate_arg_conversion_from_pyobject(arg, code) + code.putln("}") + elif new_type.is_pyobject: + self.generate_arg_conversion_to_pyobject(arg, code) + else: + if new_type.assignable_from(old_type): + code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname)) + else: + error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type)) + + def generate_arg_conversion_from_pyobject(self, arg, code): + new_type = arg.type + # copied from CoerceFromPyTypeNode + if new_type.from_py_function: + code.putln(new_type.from_py_call_code( + arg.hdr_cname, + arg.entry.cname, + arg.pos, + code, + )) + else: + error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type) + + def generate_arg_conversion_to_pyobject(self, arg, code): + old_type = arg.hdr_type + func = old_type.to_py_function + if func: + code.putln("%s = %s(%s); %s" % ( + arg.entry.cname, + func, + arg.hdr_cname, + code.error_goto_if_null(arg.entry.cname, arg.pos))) + code.put_var_gotref(arg.entry) + else: + error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type) + + def generate_argument_type_tests(self, code): + # Generate type tests for args whose signature + # type is PyObject * and whose declared type is + # a subtype thereof. + for arg in self.args: + if arg.needs_type_test: + self.generate_arg_type_test(arg, code) + elif not arg.accept_none and (arg.type.is_pyobject or + arg.type.is_buffer or + arg.type.is_memoryviewslice): + self.generate_arg_none_check(arg, code) + if self.target.entry.is_special: + for n in reversed(range(len(self.args), self.signature.max_num_fixed_args())): + # for special functions with optional args (e.g. power which can + # take 2 or 3 args), unused args are None since this is what the + # compilers sets + if self.target.entry.name == "__ipow__": + # Bug in Python < 3.8 - __ipow__ is used as a binary function + # and attempts to access the third argument will always fail + code.putln("#if PY_VERSION_HEX >= 0x03080000") + code.putln("if (unlikely(unused_arg_%s != Py_None)) {" % n) + code.putln( + 'PyErr_SetString(PyExc_TypeError, ' + '"%s() takes %s arguments but %s were given");' % ( + self.target.entry.qualified_name, self.signature.max_num_fixed_args(), n)) + code.putln("%s;" % code.error_goto(self.pos)) + code.putln("}") + if self.target.entry.name == "__ipow__": + code.putln("#endif /*PY_VERSION_HEX >= 0x03080000*/") + if self.target.entry.name == "__ipow__" and len(self.args) != 2: + # It's basically impossible to safely support it: + # Class().__ipow__(1) is guaranteed to crash. + # Therefore, raise an error. + # Use "if" instead of "#if" to avoid warnings about unused variables + code.putln("if ((PY_VERSION_HEX < 0x03080000)) {") + code.putln( + 'PyErr_SetString(PyExc_NotImplementedError, ' + '"3-argument %s cannot be used in Python<3.8");' % ( + self.target.entry.qualified_name)) + code.putln("%s;" % code.error_goto(self.pos)) + code.putln('}') + + def error_value(self): + return self.signature.error_value + + +class GeneratorDefNode(DefNode): + # Generator function node that creates a new generator instance when called. + # + # gbody GeneratorBodyDefNode the function implementing the generator + # + + is_generator = True + is_iterable_coroutine = False + gen_type_name = 'Generator' + needs_closure = True + + child_attrs = DefNode.child_attrs + ["gbody"] + + def __init__(self, pos, **kwargs): + # XXX: don't actually needs a body + kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True) + super(GeneratorDefNode, self).__init__(pos, **kwargs) + + def analyse_declarations(self, env): + super(GeneratorDefNode, self).analyse_declarations(env) + self.gbody.local_scope = self.local_scope + self.gbody.analyse_declarations(env) + + def generate_function_body(self, env, code): + body_cname = self.gbody.entry.func_cname + name = code.intern_identifier(self.name) + qualname = code.intern_identifier(self.qualname) + module_name = code.intern_identifier(self.module_name) + + code.putln('{') + code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New(' + '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % ( + self.gen_type_name, + body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL', + Naming.cur_scope_cname, name, qualname, module_name, + code.error_goto_if_null('gen', self.pos))) + code.put_decref(Naming.cur_scope_cname, py_object_type) + if self.requires_classobj: + classobj_cname = 'gen->classobj' + code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % ( + classobj_cname, Naming.self_cname)) + code.put_incref(classobj_cname, py_object_type) + code.put_giveref(classobj_cname, py_object_type) + code.put_finish_refcount_context() + code.putln('return (PyObject *) gen;') + code.putln('}') + + def generate_function_definitions(self, env, code): + env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c")) + self.gbody.generate_function_header(code, proto=True) + super(GeneratorDefNode, self).generate_function_definitions(env, code) + self.gbody.generate_function_definitions(env, code) + + +class AsyncDefNode(GeneratorDefNode): + gen_type_name = 'Coroutine' + is_coroutine = True + + +class IterableAsyncDefNode(AsyncDefNode): + gen_type_name = 'IterableCoroutine' + is_iterable_coroutine = True + + +class AsyncGenNode(AsyncDefNode): + gen_type_name = 'AsyncGen' + is_asyncgen = True + + +class GeneratorBodyDefNode(DefNode): + # Main code body of a generator implemented as a DefNode. + # + + is_generator_body = True + is_inlined = False + is_async_gen_body = False + inlined_comprehension_type = None # container type for inlined comprehensions + + def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False): + super(GeneratorBodyDefNode, self).__init__( + pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body, + doc=None, args=[], star_arg=None, starstar_arg=None) + + def declare_generator_body(self, env): + prefix = env.next_id(env.scope_prefix) + name = env.next_id('generator') + cname = Naming.genbody_prefix + prefix + name + entry = env.declare_var(None, py_object_type, self.pos, + cname=cname, visibility='private') + entry.func_cname = cname + entry.qualified_name = EncodedString(self.name) + # Work-around for https://github.com/cython/cython/issues/1699 + # We don't currently determine whether the generator entry is used or not, + # so mark it as used to avoid false warnings. + entry.used = True + self.entry = entry + + def analyse_declarations(self, env): + self.analyse_argument_types(env) + self.declare_generator_body(env) + + def generate_function_header(self, code, proto=False): + header = "static PyObject *%s(__pyx_CoroutineObject *%s, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % ( + self.entry.func_cname, + Naming.generator_cname, + Naming.local_tstate_cname, + Naming.sent_value_cname) + if proto: + code.putln('%s; /* proto */' % header) + else: + code.putln('%s /* generator body */\n{' % header) + + def generate_function_definitions(self, env, code): + lenv = self.local_scope + + # Generate closure function definitions + self.body.generate_function_definitions(lenv, code) + + # Generate C code for header and body of function + code.enter_cfunc_scope(lenv) + code.return_from_error_cleanup_label = code.new_label() + + # ----- Top-level constants used by this function + code.mark_pos(self.pos) + self.generate_cached_builtins_decls(lenv, code) + # ----- Function header + code.putln("") + self.generate_function_header(code) + closure_init_code = code.insertion_point() + # ----- Local variables + code.putln("PyObject *%s = NULL;" % Naming.retval_cname) + tempvardecl_code = code.insertion_point() + code.put_declare_refcount_context() + code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name) + profile = code.globalstate.directives['profile'] + linetrace = code.globalstate.directives['linetrace'] + if profile or linetrace: + tempvardecl_code.put_trace_declarations() + code.funcstate.can_trace = True + code_object = self.code_object.calculate_result_code(code) if self.code_object else None + code.put_trace_frame_init(code_object) + + # ----- Resume switch point. + code.funcstate.init_closure_temps(lenv.scope_class.type.scope) + resume_code = code.insertion_point() + first_run_label = code.new_label('first_run') + code.use_label(first_run_label) + code.put_label(first_run_label) + code.putln('%s' % + (code.error_goto_if_null(Naming.sent_value_cname, self.pos))) + + # ----- prepare target container for inlined comprehension + if self.is_inlined and self.inlined_comprehension_type is not None: + target_type = self.inlined_comprehension_type + if target_type is Builtin.list_type: + comp_init = 'PyList_New(0)' + elif target_type is Builtin.set_type: + comp_init = 'PySet_New(NULL)' + elif target_type is Builtin.dict_type: + comp_init = 'PyDict_New()' + else: + raise InternalError( + "invalid type of inlined comprehension: %s" % target_type) + code.putln("%s = %s; %s" % ( + Naming.retval_cname, comp_init, + code.error_goto_if_null(Naming.retval_cname, self.pos))) + code.put_gotref(Naming.retval_cname, py_object_type) + + # ----- Function body + self.generate_function_body(env, code) + # ----- Closure initialization + if lenv.scope_class.type.scope.var_entries: + closure_init_code.putln('%s = %s;' % ( + lenv.scope_class.type.declaration_code(Naming.cur_scope_cname), + lenv.scope_class.type.cast_code('%s->closure' % + Naming.generator_cname))) + # FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases + code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname) + + if profile or linetrace: + code.funcstate.can_trace = False + + code.mark_pos(self.pos) + code.putln("") + code.putln("/* function exit code */") + + # on normal generator termination, we do not take the exception propagation + # path: no traceback info is required and not creating it is much faster + if not self.is_inlined and not self.body.is_terminator: + if self.is_async_gen_body: + code.globalstate.use_utility_code( + UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) + code.putln('PyErr_SetNone(%s);' % ( + '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration')) + # ----- Error cleanup + if code.label_used(code.error_label): + if not self.body.is_terminator: + code.put_goto(code.return_label) + code.put_label(code.error_label) + if self.is_inlined and self.inlined_comprehension_type is not None: + code.put_xdecref_clear(Naming.retval_cname, py_object_type) + if Future.generator_stop in env.global_scope().context.future_directives: + # PEP 479: turn accidental StopIteration exceptions into a RuntimeError + code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c")) + code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body)) + for cname, type in code.funcstate.all_managed_temps(): + code.put_xdecref(cname, type) + code.put_add_traceback(self.entry.qualified_name) + + # ----- Non-error return cleanup + code.put_label(code.return_label) + if self.is_inlined: + code.put_xgiveref(Naming.retval_cname, py_object_type) + else: + code.put_xdecref_clear(Naming.retval_cname, py_object_type) + # For Py3.7, clearing is already done below. + code.putln("#if !CYTHON_USE_EXC_INFO_STACK") + code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname) + code.putln("#endif") + code.putln('%s->resume_label = -1;' % Naming.generator_cname) + # clean up as early as possible to help breaking any reference cycles + code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname) + if profile or linetrace: + code.put_trace_return(Naming.retval_cname, + nogil=not code.funcstate.gil_owned) + code.put_finish_refcount_context() + code.putln("return %s;" % Naming.retval_cname) + code.putln("}") + + # ----- Go back and insert temp variable declarations + tempvardecl_code.put_temp_declarations(code.funcstate) + # ----- Generator resume code + if profile or linetrace: + resume_code.put_trace_call(self.entry.qualified_name, self.pos, + nogil=not code.funcstate.gil_owned) + resume_code.putln("switch (%s->resume_label) {" % ( + Naming.generator_cname)) + + resume_code.putln("case 0: goto %s;" % first_run_label) + + for i, label in code.yield_labels: + resume_code.putln("case %d: goto %s;" % (i, label)) + resume_code.putln("default: /* CPython raises the right error here */") + if profile or linetrace: + resume_code.put_trace_return("Py_None", + nogil=not code.funcstate.gil_owned) + resume_code.put_finish_refcount_context() + resume_code.putln("return NULL;") + resume_code.putln("}") + + code.exit_cfunc_scope() + + +class OverrideCheckNode(StatNode): + # A Node for dispatching to the def method if it + # is overridden. + # + # py_func + # + # args + # func_temp + # body + + child_attrs = ['body'] + + body = None + + def analyse_expressions(self, env): + self.args = env.arg_entries + if self.py_func.is_module_scope: + first_arg = 0 + else: + first_arg = 1 + from . import ExprNodes + self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type) + call_node = ExprNodes.SimpleCallNode( + self.pos, function=self.func_node, + args=[ExprNodes.NameNode(self.pos, name=arg.name) + for arg in self.args[first_arg:]]) + if env.return_type.is_void or env.return_type.is_returncode: + self.body = StatListNode(self.pos, stats=[ + ExprStatNode(self.pos, expr=call_node), + ReturnStatNode(self.pos, value=None)]) + else: + self.body = ReturnStatNode(self.pos, value=call_node) + self.body = self.body.analyse_expressions(env) + return self + + def generate_execution_code(self, code): + # For fused functions, look up the dispatch function, not the specialisation. + method_entry = self.py_func.fused_py_func.entry if self.py_func.fused_py_func else self.py_func.entry + interned_attr_cname = code.intern_identifier(method_entry.name) + + # Check to see if we are an extension type + if self.py_func.is_module_scope: + self_arg = "((PyObject *)%s)" % Naming.module_cname + else: + self_arg = "((PyObject *)%s)" % self.args[0].cname + code.putln("/* Check if called by wrapper */") + code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname) + code.putln("/* Check if overridden in Python */") + if self.py_func.is_module_scope: + code.putln("else {") + else: + code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0) || " + "__Pyx_PyType_HasFeature(Py_TYPE(%s), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % ( + self_arg, self_arg)) + + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyDictVersioning", "ObjectHandling.c")) + # TODO: remove the object dict version check by 'inlining' the getattr implementation for methods. + # This would allow checking the dict versions around _PyType_Lookup() if it returns a descriptor, + # and would (tada!) make this check a pure type based thing instead of supporting only a single + # instance at a time. + code.putln("static PY_UINT64_T %s = __PYX_DICT_VERSION_INIT, %s = __PYX_DICT_VERSION_INIT;" % ( + Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("if (unlikely(!__Pyx_object_dict_version_matches(%s, %s, %s))) {" % ( + self_arg, Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("PY_UINT64_T %s = __Pyx_get_tp_dict_version(%s);" % ( + Naming.type_dict_guard_temp, self_arg)) + code.putln("#endif") + + func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + self.func_node.set_cname(func_node_temp) + # need to get attribute manually--scope would return cdef method + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c")) + code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % ( + func_node_temp, self_arg, interned_attr_cname, + code.error_goto_if_null(func_node_temp, self.pos))) + code.put_gotref(func_node_temp, py_object_type) + + code.putln("if (!__Pyx_IsSameCFunction(%s, (void*) %s)) {" % (func_node_temp, method_entry.func_cname)) + self.body.generate_execution_code(code) + code.putln("}") + + # NOTE: it's not 100% sure that we catch the exact versions here that were used for the lookup, + # but it is very unlikely that the versions change during lookup, and the type dict safe guard + # should increase the chance of detecting such a case. + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.putln("%s = __Pyx_get_tp_dict_version(%s);" % ( + Naming.tp_dict_version_temp, self_arg)) + code.putln("%s = __Pyx_get_object_dict_version(%s);" % ( + Naming.obj_dict_version_temp, self_arg)) + # Safety check that the type dict didn't change during the lookup. Since CPython looks up the + # attribute (descriptor) first in the type dict and then in the instance dict or through the + # descriptor, the only really far-away lookup when we get here is one in the type dict. So we + # double check the type dict version before and afterwards to guard against later changes of + # the type dict during the lookup process. + code.putln("if (unlikely(%s != %s)) {" % ( + Naming.type_dict_guard_temp, Naming.tp_dict_version_temp)) + code.putln("%s = %s = __PYX_DICT_VERSION_INIT;" % ( + Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("}") + code.putln("#endif") + + code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type) + code.funcstate.release_temp(func_node_temp) + + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.putln("}") + code.putln("#endif") + + code.putln("}") + + +class ClassDefNode(StatNode, BlockNode): + pass + + +class PyClassDefNode(ClassDefNode): + # A Python class definition. + # + # name EncodedString Name of the class + # doc string or None The class docstring + # body StatNode Attribute definition code + # entry Symtab.Entry + # scope PyClassScope + # decorators [DecoratorNode] list of decorators or None + # bases ExprNode Expression that evaluates to a tuple of base classes + # + # The following subnodes are constructed internally: + # + # doc_node NameNode '__doc__' name that is made available to the class body + # dict DictNode Class dictionary or Py3 namespace + # classobj ClassNode Class object + # target NameNode Variable to assign class object to + # orig_bases None or ExprNode "bases" before transformation by PEP560 __mro_entries__, + # used to create the __orig_bases__ attribute + + child_attrs = ["doc_node", "body", "dict", "metaclass", "mkw", "bases", "class_result", + "target", "class_cell", "decorators", "orig_bases"] + decorators = None + class_result = None + is_py3_style_class = False # Python3 style class (kwargs) + metaclass = None + mkw = None + doc_node = None + orig_bases = None + + def __init__(self, pos, name, bases, doc, body, decorators=None, + keyword_args=None, force_py3_semantics=False): + StatNode.__init__(self, pos) + self.name = name + self.doc = doc + self.body = body + self.decorators = decorators + self.bases = bases + from . import ExprNodes + if self.doc and Options.docstrings: + doc = embed_position(self.pos, self.doc) + doc_node = ExprNodes.StringNode(pos, value=doc) + self.doc_node = ExprNodes.NameNode(name=EncodedString('__doc__'), type=py_object_type, pos=pos) + else: + doc_node = None + + allow_py2_metaclass = not force_py3_semantics + if keyword_args: + allow_py2_metaclass = False + self.is_py3_style_class = True + if keyword_args.is_dict_literal: + if keyword_args.key_value_pairs: + for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]: + if item.key.value == 'metaclass': + if self.metaclass is not None: + error(item.pos, "keyword argument 'metaclass' passed multiple times") + # special case: we already know the metaclass, + # so we don't need to do the "build kwargs, + # find metaclass" dance at runtime + self.metaclass = item.value + del keyword_args.key_value_pairs[i] + self.mkw = keyword_args + else: + assert self.metaclass is not None + else: + # MergedDictNode + self.mkw = ExprNodes.ProxyNode(keyword_args) + + if force_py3_semantics or self.bases or self.mkw or self.metaclass: + if self.metaclass is None: + if keyword_args and not keyword_args.is_dict_literal: + # **kwargs may contain 'metaclass' arg + mkdict = self.mkw + else: + mkdict = None + if (not mkdict and + self.bases.is_sequence_constructor and + not self.bases.args): + pass # no base classes => no inherited metaclass + else: + self.metaclass = ExprNodes.PyClassMetaclassNode( + pos, class_def_node=self) + needs_metaclass_calculation = False + else: + needs_metaclass_calculation = True + + self.dict = ExprNodes.PyClassNamespaceNode( + pos, name=name, doc=doc_node, class_def_node=self) + self.classobj = ExprNodes.Py3ClassNode( + pos, name=name, class_def_node=self, doc=doc_node, + calculate_metaclass=needs_metaclass_calculation, + allow_py2_metaclass=allow_py2_metaclass, + force_type=force_py3_semantics, + ) + else: + # no bases, no metaclass => old style class creation + self.dict = ExprNodes.DictNode(pos, key_value_pairs=[]) + self.classobj = ExprNodes.ClassNode( + pos, name=name, class_def_node=self, doc=doc_node) + + self.target = ExprNodes.NameNode(pos, name=name) + self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos) + + def as_cclass(self): + """ + Return this node as if it were declared as an extension class + """ + if self.is_py3_style_class: + error(self.classobj.pos, "Python3 style class could not be represented as C class") + return + + from . import ExprNodes + return CClassDefNode(self.pos, + visibility='private', + module_name=None, + class_name=self.name, + bases=self.bases or ExprNodes.TupleNode(self.pos, args=[]), + decorators=self.decorators, + body=self.body, + in_pxd=False, + doc=self.doc) + + def create_scope(self, env): + genv = env + while genv.is_py_class_scope or genv.is_c_class_scope: + genv = genv.outer_scope + cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv) + return cenv + + def analyse_declarations(self, env): + unwrapped_class_result = class_result = self.classobj + if self.decorators: + from .ExprNodes import SimpleCallNode + for decorator in self.decorators[::-1]: + class_result = SimpleCallNode( + decorator.pos, + function=decorator.decorator, + args=[class_result]) + self.decorators = None + self.class_result = class_result + if self.bases: + self.bases.analyse_declarations(env) + if self.mkw: + self.mkw.analyse_declarations(env) + self.class_result.analyse_declarations(env) + self.target.analyse_target_declaration(env) + cenv = self.create_scope(env) + cenv.directives = env.directives + cenv.class_obj_cname = self.target.entry.cname + if self.doc_node: + self.doc_node.analyse_target_declaration(cenv) + self.body.analyse_declarations(cenv) + unwrapped_class_result.analyse_annotations(cenv) + + update_bases_functype = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("bases", PyrexTypes.py_object_type, None) + ]) + + def analyse_expressions(self, env): + if self.bases and not (self.bases.is_sequence_constructor and len(self.bases.args) == 0): + from .ExprNodes import PythonCapiCallNode, CloneNode + # handle the Python 3.7 __mro_entries__ transformation + orig_bases = self.bases.analyse_expressions(env) + self.bases = PythonCapiCallNode(orig_bases.pos, + function_name="__Pyx_PEP560_update_bases", + func_type=self.update_bases_functype, + utility_code=UtilityCode.load_cached('Py3UpdateBases', 'ObjectHandling.c'), + args=[CloneNode(orig_bases)]) + self.orig_bases = orig_bases + if self.bases: + self.bases = self.bases.analyse_expressions(env) + if self.mkw: + self.mkw = self.mkw.analyse_expressions(env) + if self.metaclass: + self.metaclass = self.metaclass.analyse_expressions(env) + self.dict = self.dict.analyse_expressions(env) + self.class_result = self.class_result.analyse_expressions(env) + cenv = self.scope + self.body = self.body.analyse_expressions(cenv) + self.target = self.target.analyse_target_expression(env, self.classobj) + self.class_cell = self.class_cell.analyse_expressions(cenv) + return self + + def generate_function_definitions(self, env, code): + self.generate_lambda_definitions(self.scope, code) + self.body.generate_function_definitions(self.scope, code) + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + code.pyclass_stack.append(self) + cenv = self.scope + if self.orig_bases: + self.orig_bases.generate_evaluation_code(code) + if self.bases: + self.bases.generate_evaluation_code(code) + if self.mkw: + self.mkw.generate_evaluation_code(code) + if self.metaclass: + self.metaclass.generate_evaluation_code(code) + self.dict.generate_evaluation_code(code) + if self.orig_bases: + # update __orig_bases__ if needed + code.putln("if (%s != %s) {" % (self.bases.result(), self.orig_bases.result())) + code.putln( + code.error_goto_if_neg('PyDict_SetItemString(%s, "__orig_bases__", %s)' % ( + self.dict.result(), self.orig_bases.result()), + self.pos + )) + code.putln("}") + self.orig_bases.generate_disposal_code(code) + self.orig_bases.free_temps(code) + cenv.namespace_cname = cenv.class_obj_cname = self.dict.result() + + class_cell = self.class_cell + if class_cell is not None and not class_cell.is_active: + class_cell = None + + if class_cell is not None: + class_cell.generate_evaluation_code(code) + self.body.generate_execution_code(code) + self.class_result.generate_evaluation_code(code) + if class_cell is not None: + class_cell.generate_injection_code( + code, self.class_result.result()) + if class_cell is not None: + class_cell.generate_disposal_code(code) + class_cell.free_temps(code) + + cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result() + self.target.generate_assignment_code(self.class_result, code) + self.dict.generate_disposal_code(code) + self.dict.free_temps(code) + if self.metaclass: + self.metaclass.generate_disposal_code(code) + self.metaclass.free_temps(code) + if self.mkw: + self.mkw.generate_disposal_code(code) + self.mkw.free_temps(code) + if self.bases: + self.bases.generate_disposal_code(code) + self.bases.free_temps(code) + code.pyclass_stack.pop() + + +class CClassDefNode(ClassDefNode): + # An extension type definition. + # + # visibility 'private' or 'public' or 'extern' + # typedef_flag boolean + # api boolean + # module_name string or None For import of extern type objects + # class_name string Unqualified name of class + # as_name string or None Name to declare as in this scope + # bases TupleNode Base class(es) + # objstruct_name string or None Specified C name of object struct + # typeobj_name string or None Specified C name of type object + # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match + # in_pxd boolean Is in a .pxd file + # decorators [DecoratorNode] list of decorators or None + # doc string or None + # body StatNode or None + # entry Symtab.Entry + # base_type PyExtensionType or None + # buffer_defaults_node DictNode or None Declares defaults for a buffer + # buffer_defaults_pos + + child_attrs = ["body"] + buffer_defaults_node = None + buffer_defaults_pos = None + typedef_flag = False + api = False + objstruct_name = None + typeobj_name = None + check_size = None + decorators = None + shadow = False + + @property + def punycode_class_name(self): + return punycodify_name(self.class_name) + + def buffer_defaults(self, env): + if not hasattr(self, '_buffer_defaults'): + from . import Buffer + if self.buffer_defaults_node: + self._buffer_defaults = Buffer.analyse_buffer_options( + self.buffer_defaults_pos, + env, [], self.buffer_defaults_node, + need_complete=False) + else: + self._buffer_defaults = None + return self._buffer_defaults + + def declare(self, env): + if self.module_name and self.visibility != 'extern': + module_path = self.module_name.split(".") + home_scope = env.find_imported_module(module_path, self.pos) + if not home_scope: + return None + else: + home_scope = env + + self.entry = home_scope.declare_c_class( + name=self.class_name, + pos=self.pos, + defining=0, + implementing=0, + module_name=self.module_name, + base_type=None, + objstruct_cname=self.objstruct_name, + typeobj_cname=self.typeobj_name, + visibility=self.visibility, + typedef_flag=self.typedef_flag, + check_size = self.check_size, + api=self.api, + buffer_defaults=self.buffer_defaults(env), + shadow=self.shadow) + if self.bases and len(self.bases.args) > 1: + self.entry.type.multiple_bases = True + + def _handle_cclass_decorators(self, env): + extra_directives = {} + if not self.decorators: + return extra_directives + + from . import ExprNodes + + remaining_decorators = [] + + for original_decorator in self.decorators: + decorator = original_decorator.decorator + # entries aren't set at this point, so unfortunately we can't just do + # decorator.get_known_standard_library_import(). + # Instead we have to manually look it up + decorator_call = None + if isinstance(decorator, ExprNodes.CallNode): + decorator_call = decorator + decorator = decorator.function + known_name = Builtin.exprnode_to_known_standard_library_name(decorator, env) + if known_name == 'functools.total_ordering': + if decorator_call: + error(decorator_call.pos, "total_ordering cannot be called.") + extra_directives["total_ordering"] = True + continue + elif known_name == "dataclasses.dataclass": + args = None + kwds = {} + if decorator_call: + if isinstance(decorator_call, ExprNodes.SimpleCallNode): + args = decorator_call.args + else: + args = decorator_call.positional_args.args + kwds_ = decorator_call.keyword_args + if kwds_: + kwds = kwds_.as_python_dict() + extra_directives[known_name] = (args, kwds) + continue + remaining_decorators.append(original_decorator) + if remaining_decorators: + error(remaining_decorators[0].pos, "Cdef functions/classes cannot take arbitrary decorators.") + self.decorators = remaining_decorators + return extra_directives + + def analyse_declarations(self, env): + #print "CClassDefNode.analyse_declarations:", self.class_name + #print "...visibility =", self.visibility + #print "...module_name =", self.module_name + + if env.in_cinclude and not self.objstruct_name: + error(self.pos, "Object struct name specification required for C class defined in 'extern from' block") + extra_directives = self._handle_cclass_decorators(env) + self.base_type = None + # Now that module imports are cached, we need to + # import the modules for extern classes. + if self.module_name: + self.module = None + for module in env.cimported_modules: + if module.name == self.module_name: + self.module = module + if self.module is None: + self.module = ModuleScope(self.module_name, None, env.context) + self.module.has_extern_class = 1 + env.add_imported_module(self.module) + + if self.bases.args: + base = self.bases.args[0] + base_type = base.analyse_as_type(env) + if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type): + # Use the Python rather than C variant of these types. + base_type = env.lookup(base_type.sign_and_name()).type + if base_type is None: + error(base.pos, "First base of '%s' is not an extension type" % self.class_name) + elif base_type == PyrexTypes.py_object_type: + base_class_scope = None + elif not base_type.is_extension_type and \ + not (base_type.is_builtin_type and base_type.objstruct_cname): + error(base.pos, "'%s' is not an extension type" % base_type) + elif not base_type.is_complete(): + error(base.pos, "Base class '%s' of type '%s' is incomplete" % ( + base_type.name, self.class_name)) + elif base_type.scope and base_type.scope.directives and \ + base_type.is_final_type: + error(base.pos, "Base class '%s' of type '%s' is final" % ( + base_type, self.class_name)) + elif base_type.is_builtin_type and \ + base_type.name in ('tuple', 'bytes'): + # str in Py2 is also included in this, but now checked at run-time + error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported" + % base_type.name) + else: + self.base_type = base_type + if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type: + warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1) + + has_body = self.body is not None + if has_body and self.base_type and not self.base_type.scope: + # To properly initialize inherited attributes, the base type must + # be analysed before this type. + self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env)) + return + + if self.module_name and self.visibility != 'extern': + module_path = self.module_name.split(".") + home_scope = env.find_imported_module(module_path, self.pos) + if not home_scope: + return + else: + home_scope = env + + if self.visibility == 'extern': + if (self.module_name == '__builtin__' and + self.class_name in Builtin.builtin_types and + env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython + warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1) + + self.entry = home_scope.declare_c_class( + name=self.class_name, + pos=self.pos, + defining=has_body and self.in_pxd, + implementing=has_body and not self.in_pxd, + module_name=self.module_name, + base_type=self.base_type, + objstruct_cname=self.objstruct_name, + typeobj_cname=self.typeobj_name, + check_size=self.check_size, + visibility=self.visibility, + typedef_flag=self.typedef_flag, + api=self.api, + buffer_defaults=self.buffer_defaults(env), + shadow=self.shadow) + if self.bases and len(self.bases.args) > 1: + self.entry.type.multiple_bases = True + + if self.shadow: + home_scope.lookup(self.class_name).as_variable = self.entry + if home_scope is not env and self.visibility == 'extern': + env.add_imported_entry(self.class_name, self.entry, self.pos) + self.scope = scope = self.entry.type.scope + if scope is not None: + if extra_directives: + scope.directives = env.directives.copy() + scope.directives.update(extra_directives) + else: + scope.directives = env.directives + if "dataclasses.dataclass" in scope.directives: + is_frozen = False + # Retrieve the @dataclass config (args, kwargs), as passed into the decorator. + dataclass_config = scope.directives["dataclasses.dataclass"] + if dataclass_config: + decorator_kwargs = dataclass_config[1] + frozen_flag = decorator_kwargs.get('frozen') + is_frozen = frozen_flag and frozen_flag.is_literal and frozen_flag.value + scope.is_c_dataclass_scope = "frozen" if is_frozen else True + + if self.doc and Options.docstrings: + scope.doc = embed_position(self.pos, self.doc) + + if has_body: + self.body.analyse_declarations(scope) + dict_entry = self.scope.lookup_here("__dict__") + if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented): + dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter") + self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos) + if self.in_pxd: + scope.defined = 1 + else: + scope.implemented = 1 + + if len(self.bases.args) > 1: + if not has_body or self.in_pxd: + error(self.bases.args[1].pos, "Only declare first base in declaration.") + # At runtime, we check that the other bases are heap types + # and that a __dict__ is added if required. + for other_base in self.bases.args[1:]: + if other_base.analyse_as_type(env): + error(other_base.pos, "Only one extension type base class allowed.") + self.entry.type.early_init = 0 + from . import ExprNodes + self.type_init_args = ExprNodes.TupleNode( + self.pos, + args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name), + self.bases, + ExprNodes.DictNode(self.pos, key_value_pairs=[])]) + elif self.base_type: + self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init + self.type_init_args = None + else: + self.entry.type.early_init = 1 + self.type_init_args = None + + env.allocate_vtable_names(self.entry) + + for thunk in self.entry.type.defered_declarations: + thunk() + + def analyse_expressions(self, env): + if self.body: + scope = self.entry.type.scope + self.body = self.body.analyse_expressions(scope) + if self.type_init_args: + self.type_init_args.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + if self.body: + self.generate_lambda_definitions(self.scope, code) + self.body.generate_function_definitions(self.scope, code) + + def generate_execution_code(self, code): + # This is needed to generate evaluation code for + # default values of method arguments. + code.mark_pos(self.pos) + if not self.entry.type.early_init: + bases = None + if self.type_init_args: + # Extract bases tuple and validate 'best base' by actually calling 'type()'. + bases = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) + + self.type_init_args.generate_evaluation_code(code) + code.putln("%s = PyTuple_GET_ITEM(%s, 1);" % (bases, self.type_init_args.result())) + code.put_incref(bases, PyrexTypes.py_object_type) + + first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases + # Let Python do the base types compatibility checking. + trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) + code.putln("%s = __Pyx_PyType_GetSlot(&PyType_Type, tp_new, newfunc)(&PyType_Type, %s, NULL);" % ( + trial_type, self.type_init_args.result())) + code.putln(code.error_goto_if_null(trial_type, self.pos)) + code.put_gotref(trial_type, py_object_type) + code.putln("if (__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*) != %s) {" % ( + trial_type, first_base)) + trial_type_base = "__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*)" % trial_type + code.putln("__Pyx_TypeName base_name = __Pyx_PyType_GetName(%s);" % trial_type_base) + code.putln("__Pyx_TypeName type_name = __Pyx_PyType_GetName(%s);" % first_base) + code.putln("PyErr_Format(PyExc_TypeError, " + "\"best base '\" __Pyx_FMT_TYPENAME \"' must be equal to first base '\" __Pyx_FMT_TYPENAME \"'\",") + code.putln(" base_name, type_name);") + code.putln("__Pyx_DECREF_TypeName(base_name);") + code.putln("__Pyx_DECREF_TypeName(type_name);") + code.putln(code.error_goto(self.pos)) + code.putln("}") + + code.put_decref_clear(trial_type, PyrexTypes.py_object_type) + code.funcstate.release_temp(trial_type) + + self.type_init_args.generate_disposal_code(code) + self.type_init_args.free_temps(code) + + self.generate_type_ready_code(self.entry, code, bases_tuple_cname=bases, check_heap_type_bases=True) + if bases is not None: + code.put_decref_clear(bases, PyrexTypes.py_object_type) + code.funcstate.release_temp(bases) + + if self.body: + self.body.generate_execution_code(code) + + # Also called from ModuleNode for early init types. + @staticmethod + def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_type_bases=False): + # Generate a call to PyType_Ready for an extension + # type defined in this module. + type = entry.type + typeptr_cname = type.typeptr_cname + scope = type.scope + if not scope: # could be None if there was an error + return + if entry.visibility == 'extern': + # Generate code to initialise the typeptr of an external extension + # type defined in this module to point to its type object. + if type.typeobj_cname: + # FIXME: this should not normally be set :-? + assert not type.typeobj_cname + code.putln("%s = &%s;" % ( + type.typeptr_cname, + type.typeobj_cname, + )) + return + # TODO: remove 'else:' and dedent + else: + assert typeptr_cname + assert type.typeobj_cname + typespec_cname = "%s_spec" % type.typeobj_cname + code.putln("#if CYTHON_USE_TYPE_SPECS") + tuple_temp = None + if not bases_tuple_cname and scope.parent_type.base_type: + tuple_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln("%s = PyTuple_Pack(1, (PyObject *)%s); %s" % ( + tuple_temp, + scope.parent_type.base_type.typeptr_cname, + code.error_goto_if_null(tuple_temp, entry.pos), + )) + code.put_gotref(tuple_temp, py_object_type) + + if bases_tuple_cname or tuple_temp: + if check_heap_type_bases: + code.globalstate.use_utility_code( + UtilityCode.load_cached('ValidateBasesTuple', 'ExtensionTypes.c')) + code.put_error_if_neg(entry.pos, "__Pyx_validate_bases_tuple(%s.name, %s, %s)" % ( + typespec_cname, + TypeSlots.get_slot_by_name("tp_dictoffset", scope.directives).slot_code(scope), + bases_tuple_cname or tuple_temp, + )) + + code.putln("%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, %s);" % ( + typeptr_cname, + Naming.module_cname, + typespec_cname, + bases_tuple_cname or tuple_temp, + )) + if tuple_temp: + code.put_xdecref_clear(tuple_temp, type=py_object_type) + code.funcstate.release_temp(tuple_temp) + code.putln(code.error_goto_if_null(typeptr_cname, entry.pos)) + else: + code.putln( + "%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, NULL); %s" % ( + typeptr_cname, + Naming.module_cname, + typespec_cname, + code.error_goto_if_null(typeptr_cname, entry.pos), + )) + + # The buffer interface is not currently supported by PyType_FromSpec(). + buffer_slot = TypeSlots.get_slot_by_name("tp_as_buffer", code.globalstate.directives) + if not buffer_slot.is_empty(scope): + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") + code.putln("%s->%s = %s;" % ( + typeptr_cname, + buffer_slot.slot_name, + buffer_slot.slot_code(scope), + )) + # Still need to inherit buffer methods since PyType_Ready() didn't do it for us. + for buffer_method_name in ("__getbuffer__", "__releasebuffer__"): + buffer_slot = TypeSlots.get_slot_table( + code.globalstate.directives).get_slot_by_method_name(buffer_method_name) + if buffer_slot.slot_code(scope) == "0" and not TypeSlots.get_base_slot_function(scope, buffer_slot): + code.putln("if (!%s->tp_as_buffer->%s &&" + " %s->tp_base->tp_as_buffer &&" + " %s->tp_base->tp_as_buffer->%s) {" % ( + typeptr_cname, buffer_slot.slot_name, + typeptr_cname, + typeptr_cname, buffer_slot.slot_name, + )) + code.putln("%s->tp_as_buffer->%s = %s->tp_base->tp_as_buffer->%s;" % ( + typeptr_cname, buffer_slot.slot_name, + typeptr_cname, buffer_slot.slot_name, + )) + code.putln("}") + code.putln("#elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer)") + code.putln("/* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */") + code.putln("#elif defined(_MSC_VER)") + code.putln("#pragma message (\"The buffer protocol is not supported in the Limited C-API < 3.11.\")") + code.putln("#else") + code.putln("#warning \"The buffer protocol is not supported in the Limited C-API < 3.11.\"") + code.putln("#endif") + + code.globalstate.use_utility_code( + UtilityCode.load_cached("FixUpExtensionType", "ExtensionTypes.c")) + code.put_error_if_neg(entry.pos, "__Pyx_fix_up_extension_type_from_spec(&%s, %s)" % ( + typespec_cname, typeptr_cname)) + + code.putln("#else") + if bases_tuple_cname: + code.put_incref(bases_tuple_cname, py_object_type) + code.put_giveref(bases_tuple_cname, py_object_type) + code.putln("%s.tp_bases = %s;" % (type.typeobj_cname, bases_tuple_cname)) + code.putln("%s = &%s;" % ( + typeptr_cname, + type.typeobj_cname, + )) + code.putln("#endif") # if CYTHON_USE_TYPE_SPECS + + base_type = type.base_type + while base_type: + if base_type.is_external and not base_type.objstruct_cname == "PyTypeObject": + # 'type' is special-cased because it is actually based on PyHeapTypeObject + # Variable length bases are allowed if the current class doesn't grow + code.putln("if (sizeof(%s%s) != sizeof(%s%s)) {" % ( + "" if type.typedef_flag else "struct ", type.objstruct_cname, + "" if base_type.typedef_flag else "struct ", base_type.objstruct_cname)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("ValidateExternBase", "ExtensionTypes.c")) + code.put_error_if_neg(entry.pos, "__Pyx_validate_extern_base(%s)" % ( + type.base_type.typeptr_cname)) + code.putln("}") + break + base_type = base_type.base_type + + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") + # FIXME: these still need to get initialised even with the limited-API + for slot in TypeSlots.get_slot_table(code.globalstate.directives): + slot.generate_dynamic_init_code(scope, code) + code.putln("#endif") + + code.putln("#if !CYTHON_USE_TYPE_SPECS") + code.globalstate.use_utility_code( + UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c')) + code.put_error_if_neg(entry.pos, "__Pyx_PyType_Ready(%s)" % typeptr_cname) + code.putln("#endif") + + # Don't inherit tp_print from builtin types in Python 2, restoring the + # behavior of using tp_repr or tp_str instead. + # ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1) + code.putln("#if PY_MAJOR_VERSION < 3") + code.putln("%s->tp_print = 0;" % typeptr_cname) + code.putln("#endif") + + # Use specialised attribute lookup for types with generic lookup but no instance dict. + getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro') + dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset') + if getattr_slot_func == '0' and dictoffset_slot_func == '0': + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME + if type.is_final_type: + py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable + utility_func = "PyObject_GenericGetAttrNoDict" + else: + py_cfunc = "__Pyx_PyObject_GenericGetAttr" + utility_func = "PyObject_GenericGetAttr" + code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c")) + + code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&" + " likely(!%s->tp_dictoffset && %s->tp_getattro == PyObject_GenericGetAttr)) {" % ( + typeptr_cname, typeptr_cname)) + code.putln("%s->tp_getattro = %s;" % ( + typeptr_cname, py_cfunc)) + code.putln("}") + code.putln("#endif") # if !CYTHON_COMPILING_IN_LIMITED_API + + # Fix special method docstrings. This is a bit of a hack, but + # unless we let PyType_Ready create the slot wrappers we have + # a significant performance hit. (See trac #561.) + for func in entry.type.scope.pyfunc_entries: + is_buffer = func.name in ('__getbuffer__', '__releasebuffer__') + if (func.is_special and Options.docstrings and + func.wrapperbase_cname and not is_buffer): + slot = TypeSlots.get_slot_table( + entry.type.scope.directives).get_slot_by_method_name(func.name) + preprocessor_guard = slot.preprocessor_guard_code() if slot else None + if preprocessor_guard: + code.putln(preprocessor_guard) + code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC') + code.putln("{") + code.putln( + 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)%s, "%s"); %s' % ( + typeptr_cname, + func.name, + code.error_goto_if_null('wrapper', entry.pos))) + code.putln( + "if (__Pyx_IS_TYPE(wrapper, &PyWrapperDescr_Type)) {") + code.putln( + "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % ( + func.wrapperbase_cname)) + code.putln( + "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname)) + code.putln( + "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % ( + func.wrapperbase_cname)) + code.putln("}") + code.putln("}") + code.putln('#endif') + if preprocessor_guard: + code.putln('#endif') + + if type.vtable_cname: + code.globalstate.use_utility_code( + UtilityCode.load_cached('SetVTable', 'ImportExport.c')) + code.put_error_if_neg(entry.pos, "__Pyx_SetVtable(%s, %s)" % ( + typeptr_cname, + type.vtabptr_cname, + )) + # TODO: find a way to make this work with the Limited API! + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") + code.globalstate.use_utility_code( + UtilityCode.load_cached('MergeVTables', 'ImportExport.c')) + code.put_error_if_neg(entry.pos, "__Pyx_MergeVtables(%s)" % typeptr_cname) + code.putln("#endif") + if not type.scope.is_internal and not type.scope.directives.get('internal'): + # scope.is_internal is set for types defined by + # Cython (such as closures), the 'internal' + # directive is set by users + code.put_error_if_neg(entry.pos, "PyObject_SetAttr(%s, %s, (PyObject *) %s)" % ( + Naming.module_cname, + code.intern_identifier(scope.class_name), + typeptr_cname, + )) + + weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None + if weakref_entry: + if weakref_entry.type is py_object_type: + tp_weaklistoffset = "%s->tp_weaklistoffset" % typeptr_cname + if type.typedef_flag: + objstruct = type.objstruct_cname + else: + objstruct = "struct %s" % type.objstruct_cname + code.putln("if (%s == 0) %s = offsetof(%s, %s);" % ( + tp_weaklistoffset, + tp_weaklistoffset, + objstruct, + weakref_entry.cname)) + else: + error(weakref_entry.pos, "__weakref__ slot must be of type 'object'") + + if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None: + # Unfortunately, we cannot reliably detect whether a + # superclass defined __reduce__ at compile time, so we must + # do so at runtime. + code.globalstate.use_utility_code( + UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c')) + code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME + code.put_error_if_neg(entry.pos, "__Pyx_setup_reduce((PyObject *) %s)" % typeptr_cname) + code.putln("#endif") + + def annotate(self, code): + if self.type_init_args: + self.type_init_args.annotate(code) + if self.body: + self.body.annotate(code) + + +class PropertyNode(StatNode): + # Definition of a property in an extension type. + # + # name string + # doc EncodedString or None Doc string + # entry Symtab.Entry The Entry of the property attribute + # body StatListNode + + child_attrs = ["body"] + + def analyse_declarations(self, env): + self.entry = env.declare_property(self.name, self.doc, self.pos) + self.body.analyse_declarations(self.entry.scope) + + def analyse_expressions(self, env): + self.body = self.body.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + pass + + def annotate(self, code): + self.body.annotate(code) + + +class CPropertyNode(StatNode): + """Definition of a C property, backed by a CFuncDefNode getter. + """ + # name string + # doc EncodedString or None Doc string of the property + # entry Symtab.Entry The Entry of the property attribute + # body StatListNode[CFuncDefNode] (for compatibility with PropertyNode) + + child_attrs = ["body"] + is_cproperty = True + + @property + def cfunc(self): + stats = self.body.stats + assert stats and isinstance(stats[0], CFuncDefNode), stats + return stats[0] + + def analyse_declarations(self, env): + scope = PropertyScope(self.name, class_scope=env) + self.body.analyse_declarations(scope) + entry = self.entry = env.declare_property( + self.name, self.doc, self.pos, ctype=self.cfunc.return_type, property_scope=scope) + entry.getter_cname = self.cfunc.entry.cname + + def analyse_expressions(self, env): + self.body = self.body.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + pass + + def annotate(self, code): + self.body.annotate(code) + + +class GlobalNode(StatNode): + # Global variable declaration. + # + # names [string] + + child_attrs = [] + + def analyse_declarations(self, env): + for name in self.names: + env.declare_global(name, self.pos) + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + pass + + +class NonlocalNode(StatNode): + # Nonlocal variable declaration via the 'nonlocal' keyword. + # + # names [string] + + child_attrs = [] + + def analyse_declarations(self, env): + for name in self.names: + env.declare_nonlocal(name, self.pos) + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + pass + + +class ExprStatNode(StatNode): + # Expression used as a statement. + # + # expr ExprNode + + child_attrs = ["expr"] + + def analyse_declarations(self, env): + from . import ExprNodes + expr = self.expr + if isinstance(expr, ExprNodes.GeneralCallNode): + func = expr.function.as_cython_attribute() + if func == u'declare': + args, kwds = expr.explicit_args_kwds() + if len(args): + error(expr.pos, "Variable names must be specified.") + for var, type_node in kwds.key_value_pairs: + type = type_node.analyse_as_type(env) + if type is None: + error(type_node.pos, "Unknown type") + else: + env.declare_var(var.value, type, var.pos, is_cdef=True) + self.__class__ = PassStatNode + elif getattr(expr, 'annotation', None) is not None: + if expr.is_name: + # non-code variable annotation, e.g. "name: type" + expr.declare_from_annotation(env) + self.__class__ = PassStatNode + elif expr.is_attribute or expr.is_subscript: + # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type" + self.__class__ = PassStatNode + + def analyse_expressions(self, env): + self.expr.result_is_used = False # hint that .result() may safely be left empty + self.expr = self.expr.analyse_expressions(env) + # Repeat in case of node replacement. + self.expr.result_is_used = False # hint that .result() may safely be left empty + return self + + def nogil_check(self, env): + if self.expr.type.is_pyobject and self.expr.is_temp: + self.gil_error() + + gil_message = "Discarding owned Python object" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + self.expr.result_is_used = False # hint that .result() may safely be left empty + self.expr.generate_evaluation_code(code) + if not self.expr.is_temp and self.expr.result(): + result = self.expr.result() + if not self.expr.type.is_void: + result = "(void)(%s)" % result + code.putln("%s;" % result) + self.expr.generate_disposal_code(code) + self.expr.free_temps(code) + + def generate_function_definitions(self, env, code): + self.expr.generate_function_definitions(env, code) + + def annotate(self, code): + self.expr.annotate(code) + + +class AssignmentNode(StatNode): + # Abstract base class for assignment nodes. + # + # The analyse_expressions and generate_execution_code + # phases of assignments are split into two sub-phases + # each, to enable all the right hand sides of a + # parallel assignment to be evaluated before assigning + # to any of the left hand sides. + + def _warn_on_const_assignment(self, lhs, rhs): + rhs_t = rhs.type + lhs_t = lhs.type + if rhs_t.is_ptr and rhs_t.base_type.is_const and lhs_t.is_ptr and not lhs_t.base_type.is_const: + warning(self.pos, "Assigning to '{}' from '{}' discards const qualifier".format(lhs_t, rhs_t), level=1) + + def _check_const_assignment(self, node): + if isinstance(node, AssignmentNode): + self._warn_on_const_assignment(node.lhs, node.rhs) + + def analyse_expressions(self, env): + node = self.analyse_types(env) + self._check_const_assignment(node) + if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode): + if node.rhs.type.is_ptr and node.rhs.is_ephemeral(): + error(self.pos, "Storing unsafe C derivative of temporary Python reference") + return node + +# def analyse_expressions(self, env): +# self.analyse_expressions_1(env) +# self.analyse_expressions_2(env) + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + self.generate_rhs_evaluation_code(code) + self.generate_assignment_code(code) + + +class SingleAssignmentNode(AssignmentNode): + # The simplest case: + # + # a = b + # + # lhs ExprNode Left hand side + # rhs ExprNode Right hand side + # first bool Is this guaranteed the first assignment to lhs? + # is_overloaded_assignment bool Is this assignment done via an overloaded operator= + # is_assignment_expression bool Internally SingleAssignmentNode is used to implement assignment expressions + # exception_check + # exception_value + + child_attrs = ["lhs", "rhs"] + first = False + is_overloaded_assignment = False + is_assignment_expression = False + declaration_only = False + + def analyse_declarations(self, env): + from . import ExprNodes + + # handle declarations of the form x = cython.foo() + if isinstance(self.rhs, ExprNodes.CallNode): + func_name = self.rhs.function.as_cython_attribute() + if func_name: + args, kwds = self.rhs.explicit_args_kwds() + if func_name in ['declare', 'typedef']: + if len(args) > 2: + error(args[2].pos, "Invalid positional argument.") + return + if kwds is not None: + kwdict = kwds.compile_time_value(None) + if func_name == 'typedef' or 'visibility' not in kwdict: + error(kwds.pos, "Invalid keyword argument.") + return + visibility = kwdict['visibility'] + else: + visibility = 'private' + type = args[0].analyse_as_type(env) + if type is None: + error(args[0].pos, "Unknown type") + return + lhs = self.lhs + if func_name == 'declare': + if isinstance(lhs, ExprNodes.NameNode): + vars = [(lhs.name, lhs.pos)] + elif isinstance(lhs, ExprNodes.TupleNode): + vars = [(var.name, var.pos) for var in lhs.args] + else: + error(lhs.pos, "Invalid declaration") + return + for var, pos in vars: + env.declare_var(var, type, pos, is_cdef=True, visibility=visibility) + if len(args) == 2: + # we have a value + self.rhs = args[1] + else: + self.declaration_only = True + else: + self.declaration_only = True + if not isinstance(lhs, ExprNodes.NameNode): + error(lhs.pos, "Invalid declaration.") + env.declare_typedef(lhs.name, type, self.pos, visibility='private') + + elif func_name in ['struct', 'union']: + self.declaration_only = True + if len(args) > 0 or kwds is None: + error(self.rhs.pos, "Struct or union members must be given by name.") + return + members = [] + for member, type_node in kwds.key_value_pairs: + type = type_node.analyse_as_type(env) + if type is None: + error(type_node.pos, "Unknown type") + else: + members.append((member.value, type, member.pos)) + if len(members) < len(kwds.key_value_pairs): + return + if not isinstance(self.lhs, ExprNodes.NameNode): + error(self.lhs.pos, "Invalid declaration.") + name = self.lhs.name + scope = StructOrUnionScope(name) + env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos) + for member, type, pos in members: + scope.declare_var(member, type, pos) + + elif func_name == 'fused_type': + # dtype = cython.fused_type(...) + self.declaration_only = True + if kwds: + error(self.rhs.function.pos, + "fused_type does not take keyword arguments") + + fusednode = FusedTypeNode(self.rhs.pos, + name=self.lhs.name, types=args) + fusednode.analyse_declarations(env) + + if self.declaration_only: + return + else: + if self.is_assignment_expression: + self.lhs.analyse_assignment_expression_target_declaration(env) + else: + self.lhs.analyse_target_declaration(env) + # if an entry doesn't exist that just implies that lhs isn't made up purely + # of AttributeNodes and NameNodes - it isn't useful as a known path to + # a standard library module + if (self.lhs.is_attribute or self.lhs.is_name) and self.lhs.entry and not self.lhs.entry.known_standard_library_import: + stdlib_import_name = self.rhs.get_known_standard_library_import() + if stdlib_import_name: + self.lhs.entry.known_standard_library_import = stdlib_import_name + + def analyse_types(self, env, use_temp=0): + from . import ExprNodes + + self.rhs = self.rhs.analyse_types(env) + + unrolled_assignment = self.unroll_rhs(env) + if unrolled_assignment: + return unrolled_assignment + + self.lhs = self.lhs.analyse_target_types(env) + self.lhs.gil_assignment_check(env) + unrolled_assignment = self.unroll_lhs(env) + if unrolled_assignment: + return unrolled_assignment + + if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode): + self.lhs.analyse_broadcast_operation(self.rhs) + self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs) + elif self.lhs.type.is_array: + if not isinstance(self.lhs, ExprNodes.SliceIndexNode): + # cannot assign to C array, only to its full slice + lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None) + self.lhs = lhs.analyse_target_types(env) + + if self.lhs.type.is_cpp_class: + op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type]) + if op: + rhs = self.rhs + self.is_overloaded_assignment = True + self.exception_check = op.type.exception_check + self.exception_value = op.type.exception_value + if self.exception_check == '+' and self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + else: + rhs = self.rhs.coerce_to(self.lhs.type, env) + else: + rhs = self.rhs.coerce_to(self.lhs.type, env) + + if use_temp or rhs.is_attribute or ( + not rhs.is_name and not rhs.is_literal and + rhs.type.is_pyobject): + # things like (cdef) attribute access are not safe (traverses pointers) + rhs = rhs.coerce_to_temp(env) + elif rhs.type.is_pyobject: + rhs = rhs.coerce_to_simple(env) + self.rhs = rhs + return self + + def unroll(self, node, target_size, env): + from . import ExprNodes, UtilNodes + + base = node + start_node = stop_node = step_node = check_node = None + + if node.type.is_ctuple: + slice_size = node.type.size + + elif node.type.is_ptr or node.type.is_array: + while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop): + base = node = node.base + if isinstance(node, ExprNodes.SliceIndexNode): + base = node.base + start_node = node.start + if start_node: + start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + stop_node = node.stop + if stop_node: + stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + else: + if node.type.is_array and node.type.size: + stop_node = ExprNodes.IntNode( + self.pos, value=str(node.type.size), + constant_result=(node.type.size if isinstance(node.type.size, _py_int_types) + else ExprNodes.constant_value_not_set)) + else: + error(self.pos, "C array iteration requires known end index") + return + step_node = None #node.step + if step_node: + step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + + # TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here. + def get_const(node, none_value): + if node is None: + return none_value + elif node.has_constant_result(): + return node.constant_result + else: + raise ValueError("Not a constant.") + + try: + slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1) + except ValueError: + error(self.pos, "C array assignment currently requires known endpoints") + return + + elif node.type.is_array: + slice_size = node.type.size + if not isinstance(slice_size, _py_int_types): + return # might still work when coercing to Python + else: + return + + else: + return + + if slice_size != target_size: + error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % ( + slice_size, target_size)) + return + + items = [] + base = UtilNodes.LetRefNode(base) + refs = [base] + if start_node and not start_node.is_literal: + start_node = UtilNodes.LetRefNode(start_node) + refs.append(start_node) + if stop_node and not stop_node.is_literal: + stop_node = UtilNodes.LetRefNode(stop_node) + refs.append(stop_node) + if step_node and not step_node.is_literal: + step_node = UtilNodes.LetRefNode(step_node) + refs.append(step_node) + + for ix in range(target_size): + ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type) + if step_node is not None: + if step_node.has_constant_result(): + step_value = ix_node.constant_result * step_node.constant_result + ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value) + else: + ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node) + if start_node is not None: + if start_node.has_constant_result() and ix_node.has_constant_result(): + index_value = ix_node.constant_result + start_node.constant_result + ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value) + else: + ix_node = ExprNodes.AddNode( + self.pos, operator='+', operand1=start_node, operand2=ix_node) + items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env))) + return check_node, refs, items + + def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env): + from . import UtilNodes + assignments = [] + for lhs, rhs in zip(lhs_list, rhs_list): + assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first)) + node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env) + if check_node: + node = StatListNode(pos=self.pos, stats=[check_node, node]) + for ref in refs[::-1]: + node = UtilNodes.LetNode(ref, node) + return node + + def unroll_rhs(self, env): + from . import ExprNodes + if not isinstance(self.lhs, ExprNodes.TupleNode): + return + if any(arg.is_starred for arg in self.lhs.args): + return + + unrolled = self.unroll(self.rhs, len(self.lhs.args), env) + if not unrolled: + return + check_node, refs, rhs = unrolled + return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env) + + def unroll_lhs(self, env): + if self.lhs.type.is_ctuple: + # Handled directly. + return + from . import ExprNodes + if not isinstance(self.rhs, ExprNodes.TupleNode): + return + + unrolled = self.unroll(self.lhs, len(self.rhs.args), env) + if not unrolled: + return + check_node, refs, lhs = unrolled + return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env) + + def generate_rhs_evaluation_code(self, code): + self.rhs.generate_evaluation_code(code) + + def generate_assignment_code(self, code, overloaded_assignment=False): + if self.is_overloaded_assignment: + self.lhs.generate_assignment_code( + self.rhs, + code, + overloaded_assignment=self.is_overloaded_assignment, + exception_check=self.exception_check, + exception_value=self.exception_value) + else: + self.lhs.generate_assignment_code(self.rhs, code) + + def generate_function_definitions(self, env, code): + self.rhs.generate_function_definitions(env, code) + + def annotate(self, code): + self.lhs.annotate(code) + self.rhs.annotate(code) + + +class CascadedAssignmentNode(AssignmentNode): + # An assignment with multiple left hand sides: + # + # a = b = c + # + # lhs_list [ExprNode] Left hand sides + # rhs ExprNode Right hand sides + # + # Used internally: + # + # coerced_values [ExprNode] RHS coerced to all distinct LHS types + # cloned_values [ExprNode] cloned RHS value for each LHS + # assignment_overloads [Bool] If each assignment uses a C++ operator= + + child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"] + cloned_values = None + coerced_values = None + assignment_overloads = None + + def _check_const_assignment(self, node): + if isinstance(node, CascadedAssignmentNode): + for lhs in node.lhs_list: + self._warn_on_const_assignment(lhs, node.rhs) + + def analyse_declarations(self, env): + for lhs in self.lhs_list: + lhs.analyse_target_declaration(env) + + def analyse_types(self, env, use_temp=0): + from .ExprNodes import CloneNode, ProxyNode + + # collect distinct types used on the LHS + lhs_types = set() + for i, lhs in enumerate(self.lhs_list): + lhs = self.lhs_list[i] = lhs.analyse_target_types(env) + lhs.gil_assignment_check(env) + lhs_types.add(lhs.type) + + rhs = self.rhs.analyse_types(env) + # common special case: only one type needed on the LHS => coerce only once + if len(lhs_types) == 1: + # Avoid coercion for overloaded assignment operators. + if next(iter(lhs_types)).is_cpp_class: + op = env.lookup_operator('=', [lhs, self.rhs]) + if not op: + rhs = rhs.coerce_to(lhs_types.pop(), env) + else: + rhs = rhs.coerce_to(lhs_types.pop(), env) + + if not rhs.is_name and not rhs.is_literal and ( + use_temp or rhs.is_attribute or rhs.type.is_pyobject): + rhs = rhs.coerce_to_temp(env) + else: + rhs = rhs.coerce_to_simple(env) + self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs + + # clone RHS and coerce it to all distinct LHS types + self.coerced_values = [] + coerced_values = {} + self.assignment_overloads = [] + for lhs in self.lhs_list: + overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs]) + self.assignment_overloads.append(overloaded) + if lhs.type not in coerced_values and lhs.type != rhs.type: + rhs = CloneNode(self.rhs) + if not overloaded: + rhs = rhs.coerce_to(lhs.type, env) + self.coerced_values.append(rhs) + coerced_values[lhs.type] = rhs + + # clone coerced values for all LHS assignments + self.cloned_values = [] + for lhs in self.lhs_list: + rhs = coerced_values.get(lhs.type, self.rhs) + self.cloned_values.append(CloneNode(rhs)) + return self + + def generate_rhs_evaluation_code(self, code): + self.rhs.generate_evaluation_code(code) + + def generate_assignment_code(self, code, overloaded_assignment=False): + # prepare all coercions + for rhs in self.coerced_values: + rhs.generate_evaluation_code(code) + # assign clones to LHS + for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads): + rhs.generate_evaluation_code(code) + lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload) + # dispose of coerced values and original RHS + for rhs_value in self.coerced_values: + rhs_value.generate_disposal_code(code) + rhs_value.free_temps(code) + self.rhs.generate_disposal_code(code) + self.rhs.free_temps(code) + + def generate_function_definitions(self, env, code): + self.rhs.generate_function_definitions(env, code) + + def annotate(self, code): + for rhs in self.coerced_values: + rhs.annotate(code) + for lhs, rhs in zip(self.lhs_list, self.cloned_values): + lhs.annotate(code) + rhs.annotate(code) + self.rhs.annotate(code) + + +class ParallelAssignmentNode(AssignmentNode): + # A combined packing/unpacking assignment: + # + # a, b, c = d, e, f + # + # This has been rearranged by the parser into + # + # a = d ; b = e ; c = f + # + # but we must evaluate all the right hand sides + # before assigning to any of the left hand sides. + # + # stats [AssignmentNode] The constituent assignments + + child_attrs = ["stats"] + + def analyse_declarations(self, env): + for stat in self.stats: + stat.analyse_declarations(env) + + def analyse_expressions(self, env): + self.stats = [stat.analyse_types(env, use_temp=1) + for stat in self.stats] + + for stat in self.stats: + stat._check_const_assignment(stat) + return self + +# def analyse_expressions(self, env): +# for stat in self.stats: +# stat.analyse_expressions_1(env, use_temp=1) +# for stat in self.stats: +# stat.analyse_expressions_2(env) + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + for stat in self.stats: + stat.generate_rhs_evaluation_code(code) + for stat in self.stats: + stat.generate_assignment_code(code) + + def generate_function_definitions(self, env, code): + for stat in self.stats: + stat.generate_function_definitions(env, code) + + def annotate(self, code): + for stat in self.stats: + stat.annotate(code) + + +class InPlaceAssignmentNode(AssignmentNode): + # An in place arithmetic operand: + # + # a += b + # a -= b + # ... + # + # lhs ExprNode Left hand side + # rhs ExprNode Right hand side + # operator char one of "+-*/%^&|" + # + # This code is a bit tricky because in order to obey Python + # semantics the sub-expressions (e.g. indices) of the lhs must + # not be evaluated twice. So we must reuse the values calculated + # in evaluation phase for the assignment phase as well. + # Fortunately, the type of the lhs node is fairly constrained + # (it must be a NameNode, AttributeNode, or IndexNode). + + child_attrs = ["lhs", "rhs"] + + def analyse_declarations(self, env): + self.lhs.analyse_target_declaration(env) + + def analyse_types(self, env): + self.rhs = self.rhs.analyse_types(env) + self.lhs = self.lhs.analyse_target_types(env) + + # When assigning to a fully indexed buffer or memoryview, coerce the rhs + if self.lhs.is_memview_index or self.lhs.is_buffer_access: + self.rhs = self.rhs.coerce_to(self.lhs.type, env) + elif self.lhs.type.is_string and self.operator in '+-': + # use pointer arithmetic for char* LHS instead of string concat + self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + lhs, rhs = self.lhs, self.rhs + rhs.generate_evaluation_code(code) + lhs.generate_subexpr_evaluation_code(code) + c_op = self.operator + if c_op == "//": + c_op = "/" + elif c_op == "**": + error(self.pos, "No C inplace power operator") + if lhs.is_buffer_access or lhs.is_memview_index: + if lhs.type.is_pyobject: + error(self.pos, "In-place operators not allowed on object buffers in this release.") + if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']: + error(self.pos, "In-place non-c divide operators not allowed on int buffers.") + lhs.generate_buffer_setitem_code(rhs, code, c_op) + elif lhs.is_memview_slice: + error(self.pos, "Inplace operators not supported on memoryview slices") + else: + # C++ + # TODO: make sure overload is declared + code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result())) + lhs.generate_subexpr_disposal_code(code) + lhs.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + def annotate(self, code): + self.lhs.annotate(code) + self.rhs.annotate(code) + + def create_binop_node(self): + from . import ExprNodes + return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs) + + +class PrintStatNode(StatNode): + # print statement + # + # arg_tuple TupleNode + # stream ExprNode or None (stdout) + # append_newline boolean + + child_attrs = ["arg_tuple", "stream"] + + def analyse_expressions(self, env): + if self.stream: + stream = self.stream.analyse_expressions(env) + self.stream = stream.coerce_to_pyobject(env) + arg_tuple = self.arg_tuple.analyse_expressions(env) + self.arg_tuple = arg_tuple.coerce_to_pyobject(env) + env.use_utility_code(printing_utility_code) + if len(self.arg_tuple.args) == 1 and self.append_newline: + env.use_utility_code(printing_one_utility_code) + return self + + nogil_check = Node.gil_error + gil_message = "Python print statement" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + if self.stream: + self.stream.generate_evaluation_code(code) + stream_result = self.stream.py_result() + else: + stream_result = '0' + if len(self.arg_tuple.args) == 1 and self.append_newline: + arg = self.arg_tuple.args[0] + arg.generate_evaluation_code(code) + + code.putln( + "if (__Pyx_PrintOne(%s, %s) < 0) %s" % ( + stream_result, + arg.py_result(), + code.error_goto(self.pos))) + arg.generate_disposal_code(code) + arg.free_temps(code) + else: + self.arg_tuple.generate_evaluation_code(code) + code.putln( + "if (__Pyx_Print(%s, %s, %d) < 0) %s" % ( + stream_result, + self.arg_tuple.py_result(), + self.append_newline, + code.error_goto(self.pos))) + self.arg_tuple.generate_disposal_code(code) + self.arg_tuple.free_temps(code) + + if self.stream: + self.stream.generate_disposal_code(code) + self.stream.free_temps(code) + + def generate_function_definitions(self, env, code): + if self.stream: + self.stream.generate_function_definitions(env, code) + self.arg_tuple.generate_function_definitions(env, code) + + def annotate(self, code): + if self.stream: + self.stream.annotate(code) + self.arg_tuple.annotate(code) + + +class ExecStatNode(StatNode): + # exec statement + # + # args [ExprNode] + + child_attrs = ["args"] + + def analyse_expressions(self, env): + for i, arg in enumerate(self.args): + arg = arg.analyse_expressions(env) + arg = arg.coerce_to_pyobject(env) + self.args[i] = arg + env.use_utility_code(Builtin.pyexec_utility_code) + return self + + nogil_check = Node.gil_error + gil_message = "Python exec statement" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + args = [] + for arg in self.args: + arg.generate_evaluation_code(code) + args.append(arg.py_result()) + args = tuple(args + ['0', '0'][:3-len(args)]) + temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) + code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args)) + for arg in self.args: + arg.generate_disposal_code(code) + arg.free_temps(code) + code.putln( + code.error_goto_if_null(temp_result, self.pos)) + code.put_gotref(temp_result, py_object_type) + code.put_decref_clear(temp_result, py_object_type) + code.funcstate.release_temp(temp_result) + + def annotate(self, code): + for arg in self.args: + arg.annotate(code) + + +class DelStatNode(StatNode): + # del statement + # + # args [ExprNode] + + child_attrs = ["args"] + ignore_nonexisting = False + + def analyse_declarations(self, env): + for arg in self.args: + arg.analyse_target_declaration(env) + + def analyse_expressions(self, env): + for i, arg in enumerate(self.args): + arg = self.args[i] = arg.analyse_target_expression(env, None) + if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice): + if arg.is_name and arg.entry.is_cglobal: + error(arg.pos, "Deletion of global C variable") + elif arg.type.is_ptr and arg.type.base_type.is_cpp_class: + self.cpp_check(env) + elif arg.type.is_cpp_class: + error(arg.pos, "Deletion of non-heap C++ object") + elif arg.is_subscript and arg.base.type is Builtin.bytearray_type: + pass # del ba[i] + else: + error(arg.pos, "Deletion of non-Python, non-C++ object") + #arg.release_target_temp(env) + return self + + def nogil_check(self, env): + for arg in self.args: + if arg.type.is_pyobject: + self.gil_error() + + gil_message = "Deleting Python object" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + for arg in self.args: + if (arg.type.is_pyobject or + arg.type.is_memoryviewslice or + arg.is_subscript and arg.base.type is Builtin.bytearray_type): + arg.generate_deletion_code( + code, ignore_nonexisting=self.ignore_nonexisting) + elif arg.type.is_ptr and arg.type.base_type.is_cpp_class: + arg.generate_evaluation_code(code) + code.putln("delete %s;" % arg.result()) + arg.generate_disposal_code(code) + arg.free_temps(code) + # else error reported earlier + + def annotate(self, code): + for arg in self.args: + arg.annotate(code) + + +class PassStatNode(StatNode): + # pass statement + + child_attrs = [] + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + pass + + +class IndirectionNode(StatListNode): + """ + This adds an indirection so that the node can be shared and a subtree can + be removed at any time by clearing self.stats. + """ + + def __init__(self, stats): + super(IndirectionNode, self).__init__(stats[0].pos, stats=stats) + + +class BreakStatNode(StatNode): + + child_attrs = [] + is_terminator = True + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + if not code.break_label: + error(self.pos, "break statement not inside loop") + else: + code.put_goto(code.break_label) + + +class ContinueStatNode(StatNode): + + child_attrs = [] + is_terminator = True + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + if not code.continue_label: + error(self.pos, "continue statement not inside loop") + return + code.mark_pos(self.pos) + code.put_goto(code.continue_label) + + +class ReturnStatNode(StatNode): + # return statement + # + # value ExprNode or None + # return_type PyrexType + # in_generator return inside of generator => raise StopIteration + # in_async_gen return inside of async generator + + child_attrs = ["value"] + is_terminator = True + in_generator = False + in_async_gen = False + + # Whether we are in a parallel section + in_parallel = False + + def analyse_expressions(self, env): + return_type = env.return_type + self.return_type = return_type + if not return_type: + error(self.pos, "Return not inside a function body") + return self + if self.value: + if self.in_async_gen: + error(self.pos, "Return with value in async generator") + self.value = self.value.analyse_types(env) + if return_type.is_void or return_type.is_returncode: + error(self.value.pos, "Return with value in void function") + else: + self.value = self.value.coerce_to(env.return_type, env) + else: + if (not return_type.is_void + and not return_type.is_pyobject + and not return_type.is_returncode): + error(self.pos, "Return value required") + return self + + def nogil_check(self, env): + if self.return_type.is_pyobject: + self.gil_error() + + gil_message = "Returning Python object" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + if not self.return_type: + # error reported earlier + return + + value = self.value + if self.return_type.is_pyobject: + code.put_xdecref(Naming.retval_cname, self.return_type) + if value and value.is_none: + # Use specialised default handling for "return None". + value = None + + if value: + value.generate_evaluation_code(code) + if self.return_type.is_memoryviewslice: + from . import MemoryView + MemoryView.put_acquire_memoryviewslice( + lhs_cname=Naming.retval_cname, + lhs_type=self.return_type, + lhs_pos=value.pos, + rhs=value, + code=code, + have_gil=self.in_nogil_context) + value.generate_post_assignment_code(code) + elif self.in_generator: + # return value == raise StopIteration(value), but uncatchable + code.globalstate.use_utility_code( + UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c")) + code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % ( + Naming.retval_cname, + value.py_result())) + value.generate_disposal_code(code) + else: + value.make_owned_reference(code) + code.putln("%s = %s;" % ( + Naming.retval_cname, + value.result_as(self.return_type))) + value.generate_post_assignment_code(code) + value.free_temps(code) + else: + if self.return_type.is_pyobject: + if self.in_generator: + if self.in_async_gen: + code.globalstate.use_utility_code( + UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) + code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ") + code.putln("%s = NULL;" % Naming.retval_cname) + else: + code.put_init_to_py_none(Naming.retval_cname, self.return_type) + elif self.return_type.is_returncode: + self.put_return(code, self.return_type.default_value) + + for cname, type in code.funcstate.temps_holding_reference(): + code.put_decref_clear(cname, type) + + code.put_goto(code.return_label) + + def put_return(self, code, value): + if self.in_parallel: + code.putln_openmp("#pragma omp critical(__pyx_returning)") + code.putln("%s = %s;" % (Naming.retval_cname, value)) + + def generate_function_definitions(self, env, code): + if self.value is not None: + self.value.generate_function_definitions(env, code) + + def annotate(self, code): + if self.value: + self.value.annotate(code) + + +class RaiseStatNode(StatNode): + # raise statement + # + # exc_type ExprNode or None + # exc_value ExprNode or None + # exc_tb ExprNode or None + # cause ExprNode or None + # + # set in FlowControl + # in_try_block bool + + child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"] + is_terminator = True + builtin_exc_name = None + wrap_tuple_value = False + in_try_block = False + + def analyse_expressions(self, env): + if self.exc_type: + exc_type = self.exc_type.analyse_types(env) + self.exc_type = exc_type.coerce_to_pyobject(env) + if self.exc_value: + exc_value = self.exc_value.analyse_types(env) + if self.wrap_tuple_value: + if exc_value.type is Builtin.tuple_type or not exc_value.type.is_builtin_type: + # prevent tuple values from being interpreted as argument value tuples + from .ExprNodes import TupleNode + exc_value = TupleNode(exc_value.pos, args=[exc_value.coerce_to_pyobject(env)], slow=True) + exc_value = exc_value.analyse_types(env, skip_children=True) + self.exc_value = exc_value.coerce_to_pyobject(env) + if self.exc_tb: + exc_tb = self.exc_tb.analyse_types(env) + self.exc_tb = exc_tb.coerce_to_pyobject(env) + if self.cause: + cause = self.cause.analyse_types(env) + self.cause = cause.coerce_to_pyobject(env) + # special cases for builtin exceptions + if self.exc_type and not self.exc_value and not self.exc_tb: + exc = self.exc_type + from . import ExprNodes + if (isinstance(exc, ExprNodes.SimpleCallNode) and + not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))): + exc = exc.function # extract the exception type + if exc.is_name and exc.entry.is_builtin: + from . import Symtab + self.builtin_exc_name = exc.name + if self.builtin_exc_name == 'MemoryError': + self.exc_type = None # has a separate implementation + elif (self.builtin_exc_name == 'StopIteration' and + env.is_local_scope and env.name == "__next__" and + env.parent_scope and env.parent_scope.is_c_class_scope and + not self.in_try_block): + # tp_iternext is allowed to return NULL without raising StopIteration. + # For the sake of simplicity, only allow this to happen when not in + # a try block + self.exc_type = None + + return self + + nogil_check = Node.gil_error + gil_message = "Raising exception" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + if self.builtin_exc_name == 'MemoryError': + code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos)) + return + elif self.builtin_exc_name == 'StopIteration' and not self.exc_type: + code.putln('%s = 1;' % Naming.error_without_exception_cname) + code.putln('%s;' % code.error_goto(None)) + code.funcstate.error_without_exception = True + return + + if self.exc_type: + self.exc_type.generate_evaluation_code(code) + type_code = self.exc_type.py_result() + if self.exc_type.is_name: + code.globalstate.use_entry_utility_code(self.exc_type.entry) + else: + type_code = "0" + if self.exc_value: + self.exc_value.generate_evaluation_code(code) + value_code = self.exc_value.py_result() + else: + value_code = "0" + if self.exc_tb: + self.exc_tb.generate_evaluation_code(code) + tb_code = self.exc_tb.py_result() + else: + tb_code = "0" + if self.cause: + self.cause.generate_evaluation_code(code) + cause_code = self.cause.py_result() + else: + cause_code = "0" + code.globalstate.use_utility_code(raise_utility_code) + code.putln( + "__Pyx_Raise(%s, %s, %s, %s);" % ( + type_code, + value_code, + tb_code, + cause_code)) + for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause): + if obj: + obj.generate_disposal_code(code) + obj.free_temps(code) + code.putln( + code.error_goto(self.pos)) + + def generate_function_definitions(self, env, code): + if self.exc_type is not None: + self.exc_type.generate_function_definitions(env, code) + if self.exc_value is not None: + self.exc_value.generate_function_definitions(env, code) + if self.exc_tb is not None: + self.exc_tb.generate_function_definitions(env, code) + if self.cause is not None: + self.cause.generate_function_definitions(env, code) + + def annotate(self, code): + if self.exc_type: + self.exc_type.annotate(code) + if self.exc_value: + self.exc_value.annotate(code) + if self.exc_tb: + self.exc_tb.annotate(code) + if self.cause: + self.cause.annotate(code) + + +class ReraiseStatNode(StatNode): + + child_attrs = [] + is_terminator = True + + def analyse_expressions(self, env): + return self + + nogil_check = Node.gil_error + gil_message = "Raising exception" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + vars = code.funcstate.exc_vars + if vars: + code.globalstate.use_utility_code(restore_exception_utility_code) + code.put_giveref(vars[0], py_object_type) + code.put_giveref(vars[1], py_object_type) + # fresh exceptions may not have a traceback yet (-> finally!) + code.put_xgiveref(vars[2], py_object_type) + code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars)) + for varname in vars: + code.put("%s = 0; " % varname) + code.putln() + code.putln(code.error_goto(self.pos)) + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("ReRaiseException", "Exceptions.c")) + code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos)) + + +class AssertStatNode(StatNode): + # assert statement + # + # condition ExprNode + # value ExprNode or None + # exception (Raise/GIL)StatNode created from 'value' in PostParse transform + + child_attrs = ["condition", "value", "exception"] + exception = None + + def analyse_declarations(self, env): + assert self.value is None, "Message should have been replaced in PostParse()" + assert self.exception is not None, "Message should have been replaced in PostParse()" + self.exception.analyse_declarations(env) + + def analyse_expressions(self, env): + self.condition = self.condition.analyse_temp_boolean_expression(env) + self.exception = self.exception.analyse_expressions(env) + return self + + def generate_execution_code(self, code): + code.globalstate.use_utility_code( + UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c")) + code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS") + code.putln("if (unlikely(__pyx_assertions_enabled())) {") + code.mark_pos(self.pos) + self.condition.generate_evaluation_code(code) + code.putln( + "if (unlikely(!%s)) {" % self.condition.result()) + self.exception.generate_execution_code(code) + code.putln( + "}") + self.condition.generate_disposal_code(code) + self.condition.free_temps(code) + code.putln( + "}") + code.putln("#else") + # avoid unused labels etc. + code.putln("if ((1)); else %s" % code.error_goto(self.pos, used=False)) + code.putln("#endif") + + def generate_function_definitions(self, env, code): + self.condition.generate_function_definitions(env, code) + self.exception.generate_function_definitions(env, code) + + def annotate(self, code): + self.condition.annotate(code) + self.exception.annotate(code) + + +class IfStatNode(StatNode): + # if statement + # + # if_clauses [IfClauseNode] + # else_clause StatNode or None + + child_attrs = ["if_clauses", "else_clause"] + + def analyse_declarations(self, env): + for if_clause in self.if_clauses: + if_clause.analyse_declarations(env) + if self.else_clause: + self.else_clause.analyse_declarations(env) + + def analyse_expressions(self, env): + self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses] + if self.else_clause: + self.else_clause = self.else_clause.analyse_expressions(env) + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + end_label = code.new_label() + last = len(self.if_clauses) + if not self.else_clause: + last -= 1 # avoid redundant goto at end of last if-clause + for i, if_clause in enumerate(self.if_clauses): + if_clause.generate_execution_code(code, end_label, is_last=i == last) + if self.else_clause: + code.mark_pos(self.else_clause.pos) + code.putln("/*else*/ {") + self.else_clause.generate_execution_code(code) + code.putln("}") + code.put_label(end_label) + + def generate_function_definitions(self, env, code): + for clause in self.if_clauses: + clause.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + for if_clause in self.if_clauses: + if_clause.annotate(code) + if self.else_clause: + self.else_clause.annotate(code) + + +class IfClauseNode(Node): + # if or elif clause in an if statement + # + # condition ExprNode + # body StatNode + + child_attrs = ["condition", "body"] + branch_hint = None + + def analyse_declarations(self, env): + self.body.analyse_declarations(env) + + def analyse_expressions(self, env): + self.condition = self.condition.analyse_temp_boolean_expression(env) + self.body = self.body.analyse_expressions(env) + return self + + def generate_execution_code(self, code, end_label, is_last): + self.condition.generate_evaluation_code(code) + code.mark_pos(self.pos) + condition = self.condition.result() + if self.branch_hint: + condition = '%s(%s)' % (self.branch_hint, condition) + code.putln("if (%s) {" % condition) + self.condition.generate_disposal_code(code) + self.condition.free_temps(code) + self.body.generate_execution_code(code) + code.mark_pos(self.pos, trace=False) + if not (is_last or self.body.is_terminator): + code.put_goto(end_label) + code.putln("}") + + def generate_function_definitions(self, env, code): + self.condition.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + + def annotate(self, code): + self.condition.annotate(code) + self.body.annotate(code) + + +class SwitchCaseNode(StatNode): + # Generated in the optimization of an if-elif-else node + # + # conditions [ExprNode] + # body StatNode + + child_attrs = ['conditions', 'body'] + + def generate_condition_evaluation_code(self, code): + for cond in self.conditions: + cond.generate_evaluation_code(code) + + def generate_execution_code(self, code): + num_conditions = len(self.conditions) + line_tracing_enabled = code.globalstate.directives['linetrace'] + for i, cond in enumerate(self.conditions, 1): + code.putln("case %s:" % cond.result()) + code.mark_pos(cond.pos) # Tracing code must appear *after* the 'case' statement. + if line_tracing_enabled and i < num_conditions: + # Allow fall-through after the line tracing code. + code.putln('CYTHON_FALLTHROUGH;') + self.body.generate_execution_code(code) + code.mark_pos(self.pos, trace=False) + code.putln("break;") + + def generate_function_definitions(self, env, code): + for cond in self.conditions: + cond.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + + def annotate(self, code): + for cond in self.conditions: + cond.annotate(code) + self.body.annotate(code) + + +class SwitchStatNode(StatNode): + # Generated in the optimization of an if-elif-else node + # + # test ExprNode + # cases [SwitchCaseNode] + # else_clause StatNode or None + + child_attrs = ['test', 'cases', 'else_clause'] + + def generate_execution_code(self, code): + self.test.generate_evaluation_code(code) + # Make sure all conditions are evaluated before going into the switch() statement. + # This is required in order to prevent any execution code from leaking into the space between the cases. + for case in self.cases: + case.generate_condition_evaluation_code(code) + code.mark_pos(self.pos) + code.putln("switch (%s) {" % self.test.result()) + for case in self.cases: + case.generate_execution_code(code) + if self.else_clause is not None: + code.putln("default:") + self.else_clause.generate_execution_code(code) + code.putln("break;") + else: + # Always generate a default clause to prevent C compiler warnings + # about unmatched enum values (it was not the user who decided to + # generate the switch statement, so shouldn't be bothered). + code.putln("default: break;") + code.putln("}") + self.test.generate_disposal_code(code) + self.test.free_temps(code) + + def generate_function_definitions(self, env, code): + self.test.generate_function_definitions(env, code) + for case in self.cases: + case.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + self.test.annotate(code) + for case in self.cases: + case.annotate(code) + if self.else_clause is not None: + self.else_clause.annotate(code) + + +class LoopNode(object): + pass + + +class WhileStatNode(LoopNode, StatNode): + # while statement + # + # condition ExprNode + # body StatNode + # else_clause StatNode + + child_attrs = ["condition", "body", "else_clause"] + + def analyse_declarations(self, env): + self.body.analyse_declarations(env) + if self.else_clause: + self.else_clause.analyse_declarations(env) + + def analyse_expressions(self, env): + if self.condition: + self.condition = self.condition.analyse_temp_boolean_expression(env) + self.body = self.body.analyse_expressions(env) + if self.else_clause: + self.else_clause = self.else_clause.analyse_expressions(env) + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + old_loop_labels = code.new_loop_labels() + code.putln( + "while (1) {") + if self.condition: + self.condition.generate_evaluation_code(code) + self.condition.generate_disposal_code(code) + code.putln( + "if (!%s) break;" % self.condition.result()) + self.condition.free_temps(code) + self.body.generate_execution_code(code) + code.put_label(code.continue_label) + code.putln("}") + break_label = code.break_label + code.set_loop_labels(old_loop_labels) + if self.else_clause: + code.mark_pos(self.else_clause.pos) + code.putln("/*else*/ {") + self.else_clause.generate_execution_code(code) + code.putln("}") + code.put_label(break_label) + + def generate_function_definitions(self, env, code): + if self.condition: + self.condition.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + if self.condition: + self.condition.annotate(code) + self.body.annotate(code) + if self.else_clause: + self.else_clause.annotate(code) + + +class DictIterationNextNode(Node): + # Helper node for calling PyDict_Next() inside of a WhileStatNode + # and checking the dictionary size for changes. Created in + # Optimize.py. + child_attrs = ['dict_obj', 'expected_size', 'pos_index_var', + 'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var', + 'key_target', 'value_target', 'tuple_target', 'is_dict_flag'] + + coerced_key_var = key_ref = None + coerced_value_var = value_ref = None + coerced_tuple_var = tuple_ref = None + + def __init__(self, dict_obj, expected_size, pos_index_var, + key_target, value_target, tuple_target, is_dict_flag): + Node.__init__( + self, dict_obj.pos, + dict_obj=dict_obj, + expected_size=expected_size, + pos_index_var=pos_index_var, + key_target=key_target, + value_target=value_target, + tuple_target=tuple_target, + is_dict_flag=is_dict_flag, + is_temp=True, + type=PyrexTypes.c_bint_type) + + def analyse_expressions(self, env): + from . import ExprNodes + self.dict_obj = self.dict_obj.analyse_types(env) + self.expected_size = self.expected_size.analyse_types(env) + if self.pos_index_var: + self.pos_index_var = self.pos_index_var.analyse_types(env) + if self.key_target: + self.key_target = self.key_target.analyse_target_types(env) + self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type) + self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env) + if self.value_target: + self.value_target = self.value_target.analyse_target_types(env) + self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type) + self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env) + if self.tuple_target: + self.tuple_target = self.tuple_target.analyse_target_types(env) + self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type) + self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env) + self.is_dict_flag = self.is_dict_flag.analyse_types(env) + return self + + def generate_function_definitions(self, env, code): + self.dict_obj.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c")) + self.dict_obj.generate_evaluation_code(code) + + assignments = [] + temp_addresses = [] + for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target), + (self.value_ref, self.coerced_value_var, self.value_target), + (self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]: + if target is None: + addr = 'NULL' + else: + assignments.append((var, result, target)) + var.allocate(code) + addr = '&%s' % var.result() + temp_addresses.append(addr) + + result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False) + code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % ( + result_temp, + self.dict_obj.py_result(), + self.expected_size.result(), + self.pos_index_var.result(), + temp_addresses[0], + temp_addresses[1], + temp_addresses[2], + self.is_dict_flag.result() + )) + code.putln("if (unlikely(%s == 0)) break;" % result_temp) + code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos)) + code.funcstate.release_temp(result_temp) + + # evaluate all coercions before the assignments + for var, result, target in assignments: + var.generate_gotref(code) + for var, result, target in assignments: + result.generate_evaluation_code(code) + for var, result, target in assignments: + target.generate_assignment_code(result, code) + var.release(code) + + +class SetIterationNextNode(Node): + # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode + # and checking the set size for changes. Created in Optimize.py. + child_attrs = ['set_obj', 'expected_size', 'pos_index_var', + 'coerced_value_var', 'value_target', 'is_set_flag'] + + coerced_value_var = value_ref = None + + def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag): + Node.__init__( + self, set_obj.pos, + set_obj=set_obj, + expected_size=expected_size, + pos_index_var=pos_index_var, + value_target=value_target, + is_set_flag=is_set_flag, + is_temp=True, + type=PyrexTypes.c_bint_type) + + def analyse_expressions(self, env): + from . import ExprNodes + self.set_obj = self.set_obj.analyse_types(env) + self.expected_size = self.expected_size.analyse_types(env) + self.pos_index_var = self.pos_index_var.analyse_types(env) + self.value_target = self.value_target.analyse_target_types(env) + self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type) + self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env) + self.is_set_flag = self.is_set_flag.analyse_types(env) + return self + + def generate_function_definitions(self, env, code): + self.set_obj.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c")) + self.set_obj.generate_evaluation_code(code) + + value_ref = self.value_ref + value_ref.allocate(code) + + result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False) + code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % ( + result_temp, + self.set_obj.py_result(), + self.expected_size.result(), + self.pos_index_var.result(), + value_ref.result(), + self.is_set_flag.result() + )) + code.putln("if (unlikely(%s == 0)) break;" % result_temp) + code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos)) + code.funcstate.release_temp(result_temp) + + # evaluate all coercions before the assignments + value_ref.generate_gotref(code) + self.coerced_value_var.generate_evaluation_code(code) + self.value_target.generate_assignment_code(self.coerced_value_var, code) + value_ref.release(code) + + +def ForStatNode(pos, **kw): + if 'iterator' in kw: + if kw['iterator'].is_async: + return AsyncForStatNode(pos, **kw) + else: + return ForInStatNode(pos, **kw) + else: + return ForFromStatNode(pos, **kw) + + +class _ForInStatNode(LoopNode, StatNode): + # Base class of 'for-in' statements. + # + # target ExprNode + # iterator IteratorNode | AIterAwaitExprNode(AsyncIteratorNode) + # body StatNode + # else_clause StatNode + # item NextNode | AwaitExprNode(AsyncNextNode) + # is_async boolean true for 'async for' statements + + child_attrs = ["target", "item", "iterator", "body", "else_clause"] + item = None + is_async = False + + def _create_item_node(self): + raise NotImplementedError("must be implemented by subclasses") + + def analyse_declarations(self, env): + self.target.analyse_target_declaration(env) + self.body.analyse_declarations(env) + if self.else_clause: + self.else_clause.analyse_declarations(env) + self._create_item_node() + + def analyse_expressions(self, env): + self.target = self.target.analyse_target_types(env) + self.iterator = self.iterator.analyse_expressions(env) + self._create_item_node() # must rewrap self.item after analysis + self.item = self.item.analyse_expressions(env) + if (not self.is_async and + (self.iterator.type.is_ptr or self.iterator.type.is_array) and + self.target.type.assignable_from(self.iterator.type)): + # C array slice optimization. + pass + else: + self.item = self.item.coerce_to(self.target.type, env) + self.body = self.body.analyse_expressions(env) + if self.else_clause: + self.else_clause = self.else_clause.analyse_expressions(env) + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + old_loop_labels = code.new_loop_labels() + self.iterator.generate_evaluation_code(code) + code.putln("for (;;) {") + self.item.generate_evaluation_code(code) + self.target.generate_assignment_code(self.item, code) + self.body.generate_execution_code(code) + code.mark_pos(self.pos) + code.put_label(code.continue_label) + code.putln("}") + + # clean up before we enter the 'else:' branch + self.iterator.generate_disposal_code(code) + + else_label = code.new_label("for_else") if self.else_clause else None + end_label = code.new_label("for_end") + label_intercepts = code.label_interceptor( + [code.break_label], + [end_label], + skip_to_label=else_label or end_label, + pos=self.pos, + ) + + code.mark_pos(self.pos) + for _ in label_intercepts: + self.iterator.generate_disposal_code(code) + + code.set_loop_labels(old_loop_labels) + self.iterator.free_temps(code) + + if self.else_clause: + code.putln("/*else*/ {") + code.put_label(else_label) + self.else_clause.generate_execution_code(code) + code.putln("}") + + code.put_label(end_label) + + def generate_function_definitions(self, env, code): + self.target.generate_function_definitions(env, code) + self.iterator.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + self.target.annotate(code) + self.iterator.annotate(code) + self.body.annotate(code) + if self.else_clause: + self.else_clause.annotate(code) + self.item.annotate(code) + + +class ForInStatNode(_ForInStatNode): + # 'for' statement + + is_async = False + + def _create_item_node(self): + from .ExprNodes import NextNode + self.item = NextNode(self.iterator) + + +class AsyncForStatNode(_ForInStatNode): + # 'async for' statement + # + # iterator AIterAwaitExprNode(AsyncIteratorNode) + # item AwaitIterNextExprNode(AsyncIteratorNode) + + is_async = True + + def __init__(self, pos, **kw): + assert 'item' not in kw + from . import ExprNodes + # AwaitExprNodes must appear before running MarkClosureVisitor + kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None) + _ForInStatNode.__init__(self, pos, **kw) + + def _create_item_node(self): + from . import ExprNodes + self.item.arg = ExprNodes.AsyncNextNode(self.iterator) + + +class ForFromStatNode(LoopNode, StatNode): + # for name from expr rel name rel expr + # + # target NameNode + # bound1 ExprNode + # relation1 string + # relation2 string + # bound2 ExprNode + # step ExprNode or None + # body StatNode + # else_clause StatNode or None + # + # Used internally: + # + # from_range bool + # is_py_target bool + # loopvar_node ExprNode (usually a NameNode or temp node) + # py_loopvar_node PyTempNode or None + child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"] + + is_py_target = False + loopvar_node = None + py_loopvar_node = None + from_range = False + + gil_message = "For-loop using object bounds or target" + + def nogil_check(self, env): + for x in (self.target, self.bound1, self.bound2): + if x.type.is_pyobject: + self.gil_error() + + def analyse_declarations(self, env): + self.target.analyse_target_declaration(env) + self.body.analyse_declarations(env) + if self.else_clause: + self.else_clause.analyse_declarations(env) + + def analyse_expressions(self, env): + from . import ExprNodes + self.target = self.target.analyse_target_types(env) + self.bound1 = self.bound1.analyse_types(env) + self.bound2 = self.bound2.analyse_types(env) + if self.step is not None: + if isinstance(self.step, ExprNodes.UnaryMinusNode): + warning(self.step.pos, "Probable infinite loop in for-from-by statement. " + "Consider switching the directions of the relations.", 2) + self.step = self.step.analyse_types(env) + + self.set_up_loop(env) + target_type = self.target.type + if not (target_type.is_pyobject or target_type.is_numeric): + error(self.target.pos, "for-from loop variable must be c numeric type or Python object") + + self.body = self.body.analyse_expressions(env) + if self.else_clause: + self.else_clause = self.else_clause.analyse_expressions(env) + return self + + def set_up_loop(self, env): + from . import ExprNodes + + target_type = self.target.type + if target_type.is_numeric: + loop_type = target_type + else: + if target_type.is_enum: + warning(self.target.pos, + "Integer loops over enum values are fragile. Please cast to a safe integer type instead.") + loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type + if not self.bound1.type.is_pyobject: + loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type) + if not self.bound2.type.is_pyobject: + loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type) + if self.step is not None and not self.step.type.is_pyobject: + loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type) + self.bound1 = self.bound1.coerce_to(loop_type, env) + self.bound2 = self.bound2.coerce_to(loop_type, env) + if not self.bound2.is_literal: + self.bound2 = self.bound2.coerce_to_temp(env) + if self.step is not None: + self.step = self.step.coerce_to(loop_type, env) + if not self.step.is_literal: + self.step = self.step.coerce_to_temp(env) + + if target_type.is_numeric or target_type.is_enum: + self.is_py_target = False + if isinstance(self.target, ExprNodes.BufferIndexNode): + raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.") + self.loopvar_node = self.target + self.py_loopvar_node = None + else: + self.is_py_target = True + c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env) + self.loopvar_node = c_loopvar_node + self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env) + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + old_loop_labels = code.new_loop_labels() + from_range = self.from_range + self.bound1.generate_evaluation_code(code) + self.bound2.generate_evaluation_code(code) + offset, incop = self.relation_table[self.relation1] + if self.step is not None: + self.step.generate_evaluation_code(code) + step = self.step.result() + incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP' + else: + step = '1' + + from . import ExprNodes + if isinstance(self.loopvar_node, ExprNodes.TempNode): + self.loopvar_node.allocate(code) + if isinstance(self.py_loopvar_node, ExprNodes.TempNode): + self.py_loopvar_node.allocate(code) + + loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type + + if from_range and not self.is_py_target: + loopvar_name = code.funcstate.allocate_temp(loopvar_type, False) + else: + loopvar_name = self.loopvar_node.result() + if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>': + # Handle the case where the endpoint of an unsigned int iteration + # is within step of 0. + code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % ( + loopvar_name, + self.bound1.result(), offset, step, + loopvar_name, self.relation2, self.bound2.result(), step, + loopvar_name, incop)) + else: + code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % ( + loopvar_name, + self.bound1.result(), offset, + loopvar_name, self.relation2, self.bound2.result(), + loopvar_name, incop)) + + coerced_loopvar_node = self.py_loopvar_node + if coerced_loopvar_node is None and from_range: + coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name) + if coerced_loopvar_node is not None: + coerced_loopvar_node.generate_evaluation_code(code) + self.target.generate_assignment_code(coerced_loopvar_node, code) + + self.body.generate_execution_code(code) + code.put_label(code.continue_label) + + if not from_range and self.py_loopvar_node: + # This mess is to make for..from loops with python targets behave + # exactly like those with C targets with regards to re-assignment + # of the loop variable. + if self.target.entry.is_pyglobal: + # We know target is a NameNode, this is the only ugly case. + target_node = ExprNodes.PyTempNode(self.target.pos, None) + target_node.allocate(code) + interned_cname = code.intern_identifier(self.target.entry.name) + if self.target.entry.scope.is_module_scope: + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) + lookup_func = '__Pyx_GetModuleGlobalName(%s, %s); %s' + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) + lookup_func = '__Pyx_GetNameInClass(%s, {}, %s); %s'.format( + self.target.entry.scope.namespace_cname) + code.putln(lookup_func % ( + target_node.result(), + interned_cname, + code.error_goto_if_null(target_node.result(), self.target.pos))) + target_node.generate_gotref(code) + else: + target_node = self.target + from_py_node = ExprNodes.CoerceFromPyTypeNode( + self.loopvar_node.type, target_node, self.target.entry.scope) + from_py_node.temp_code = loopvar_name + from_py_node.generate_result_code(code) + if self.target.entry.is_pyglobal: + code.put_decref(target_node.result(), target_node.type) + target_node.release(code) + + code.putln("}") + + if not from_range and self.py_loopvar_node: + # This is potentially wasteful, but we don't want the semantics to + # depend on whether or not the loop is a python type. + self.py_loopvar_node.generate_evaluation_code(code) + self.target.generate_assignment_code(self.py_loopvar_node, code) + if from_range and not self.is_py_target: + code.funcstate.release_temp(loopvar_name) + + break_label = code.break_label + code.set_loop_labels(old_loop_labels) + if self.else_clause: + code.putln("/*else*/ {") + self.else_clause.generate_execution_code(code) + code.putln("}") + code.put_label(break_label) + self.bound1.generate_disposal_code(code) + self.bound1.free_temps(code) + self.bound2.generate_disposal_code(code) + self.bound2.free_temps(code) + if isinstance(self.loopvar_node, ExprNodes.TempNode): + self.loopvar_node.release(code) + if isinstance(self.py_loopvar_node, ExprNodes.TempNode): + self.py_loopvar_node.release(code) + if self.step is not None: + self.step.generate_disposal_code(code) + self.step.free_temps(code) + + relation_table = { + # {relop : (initial offset, increment op)} + '<=': ("", "++"), + '<' : ("+1", "++"), + '>=': ("", "--"), + '>' : ("-1", "--"), + } + + def generate_function_definitions(self, env, code): + self.target.generate_function_definitions(env, code) + self.bound1.generate_function_definitions(env, code) + self.bound2.generate_function_definitions(env, code) + if self.step is not None: + self.step.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + self.target.annotate(code) + self.bound1.annotate(code) + self.bound2.annotate(code) + if self.step: + self.step.annotate(code) + self.body.annotate(code) + if self.else_clause: + self.else_clause.annotate(code) + + +class WithStatNode(StatNode): + """ + Represents a Python with statement. + + Implemented by the WithTransform as follows: + + MGR = EXPR + EXIT = MGR.__exit__ + VALUE = MGR.__enter__() + EXC = True + try: + try: + TARGET = VALUE # optional + BODY + except: + EXC = False + if not EXIT(*EXCINFO): + raise + finally: + if EXC: + EXIT(None, None, None) + MGR = EXIT = VALUE = None + """ + # manager The with statement manager object + # target ExprNode the target lhs of the __enter__() call + # body StatNode + # enter_call ExprNode the call to the __enter__() method + # exit_var String the cname of the __exit__() method reference + + child_attrs = ["manager", "enter_call", "target", "body"] + + enter_call = None + target_temp = None + + def analyse_declarations(self, env): + self.manager.analyse_declarations(env) + self.enter_call.analyse_declarations(env) + self.body.analyse_declarations(env) + + def analyse_expressions(self, env): + self.manager = self.manager.analyse_types(env) + self.enter_call = self.enter_call.analyse_types(env) + if self.target: + # set up target_temp before descending into body (which uses it) + from .ExprNodes import TempNode + self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type) + self.body = self.body.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + self.manager.generate_function_definitions(env, code) + self.enter_call.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + code.putln("/*with:*/ {") + self.manager.generate_evaluation_code(code) + self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False) + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c")) + code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % ( + self.exit_var, + self.manager.py_result(), + code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')), + code.error_goto_if_null(self.exit_var, self.pos), + )) + code.put_gotref(self.exit_var, py_object_type) + + # need to free exit_var in the face of exceptions during setup + old_error_label = code.new_error_label() + intermediate_error_label = code.error_label + + self.enter_call.generate_evaluation_code(code) + if self.target: + # The temp result will be cleaned up by the WithTargetAssignmentStatNode + # after assigning its result to the target of the 'with' statement. + self.target_temp.allocate(code) + self.enter_call.make_owned_reference(code) + code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result())) + self.enter_call.generate_post_assignment_code(code) + else: + self.enter_call.generate_disposal_code(code) + self.enter_call.free_temps(code) + + self.manager.generate_disposal_code(code) + self.manager.free_temps(code) + + code.error_label = old_error_label + self.body.generate_execution_code(code) + + if code.label_used(intermediate_error_label): + step_over_label = code.new_label() + code.put_goto(step_over_label) + code.put_label(intermediate_error_label) + code.put_decref_clear(self.exit_var, py_object_type) + code.put_goto(old_error_label) + code.put_label(step_over_label) + + code.funcstate.release_temp(self.exit_var) + code.putln('}') + + +class WithTargetAssignmentStatNode(AssignmentNode): + # The target assignment of the 'with' statement value (return + # value of the __enter__() call). + # + # This is a special cased assignment that properly cleans up the RHS. + # + # lhs ExprNode the assignment target + # rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode) + # with_node WithStatNode the surrounding with-statement + + child_attrs = ["rhs", "lhs"] + with_node = None + rhs = None + + def analyse_declarations(self, env): + self.lhs.analyse_target_declaration(env) + + def analyse_expressions(self, env): + self.lhs = self.lhs.analyse_target_types(env) + self.lhs.gil_assignment_check(env) + self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env) + return self + + def generate_execution_code(self, code): + self.rhs.generate_evaluation_code(code) + self.lhs.generate_assignment_code(self.rhs, code) + self.with_node.target_temp.release(code) + + def annotate(self, code): + self.lhs.annotate(code) + self.rhs.annotate(code) + + +class TryExceptStatNode(StatNode): + # try .. except statement + # + # body StatNode + # except_clauses [ExceptClauseNode] + # else_clause StatNode or None + + child_attrs = ["body", "except_clauses", "else_clause"] + in_generator = False + + def analyse_declarations(self, env): + self.body.analyse_declarations(env) + for except_clause in self.except_clauses: + except_clause.analyse_declarations(env) + if self.else_clause: + self.else_clause.analyse_declarations(env) + + def analyse_expressions(self, env): + self.body = self.body.analyse_expressions(env) + default_clause_seen = 0 + for i, except_clause in enumerate(self.except_clauses): + except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env) + if default_clause_seen: + error(except_clause.pos, "default 'except:' must be last") + if not except_clause.pattern: + default_clause_seen = 1 + self.has_default_clause = default_clause_seen + if self.else_clause: + self.else_clause = self.else_clause.analyse_expressions(env) + return self + + nogil_check = Node.gil_error + gil_message = "Try-except statement" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) # before changing the error label, in case of tracing errors + code.putln("{") + + old_return_label = code.return_label + old_break_label = code.break_label + old_continue_label = code.continue_label + old_error_label = code.new_error_label() + our_error_label = code.error_label + except_end_label = code.new_label('exception_handled') + except_error_label = code.new_label('except_error') + except_return_label = code.new_label('except_return') + try_return_label = code.new_label('try_return') + try_break_label = code.new_label('try_break') if old_break_label else None + try_continue_label = code.new_label('try_continue') if old_continue_label else None + try_end_label = code.new_label('try_end') + + exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False) + for _ in range(3)] + save_exc = code.insertion_point() + code.putln( + "/*try:*/ {") + code.return_label = try_return_label + code.break_label = try_break_label + code.continue_label = try_continue_label + self.body.generate_execution_code(code) + code.mark_pos(self.pos, trace=False) + code.putln( + "}") + temps_to_clean_up = code.funcstate.all_free_managed_temps() + can_raise = code.label_used(our_error_label) + + if can_raise: + # inject code before the try block to save away the exception state + code.globalstate.use_utility_code(reset_exception_utility_code) + if not self.in_generator: + save_exc.putln("__Pyx_PyThreadState_declare") + save_exc.putln("__Pyx_PyThreadState_assign") + save_exc.putln("__Pyx_ExceptionSave(%s);" % ( + ', '.join(['&%s' % var for var in exc_save_vars]))) + for var in exc_save_vars: + save_exc.put_xgotref(var, py_object_type) + + def restore_saved_exception(): + for name in exc_save_vars: + code.put_xgiveref(name, py_object_type) + code.putln("__Pyx_ExceptionReset(%s);" % + ', '.join(exc_save_vars)) + else: + # try block cannot raise exceptions, but we had to allocate the temps above, + # so just keep the C compiler from complaining about them being unused + mark_vars_used = ["(void)%s;" % var for var in exc_save_vars] + save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used)) + + def restore_saved_exception(): + pass + + code.error_label = except_error_label + code.return_label = except_return_label + normal_case_terminates = self.body.is_terminator + if self.else_clause: + code.mark_pos(self.else_clause.pos) + code.putln( + "/*else:*/ {") + self.else_clause.generate_execution_code(code) + code.putln( + "}") + if not normal_case_terminates: + normal_case_terminates = self.else_clause.is_terminator + + if can_raise: + if not normal_case_terminates: + for var in exc_save_vars: + code.put_xdecref_clear(var, py_object_type) + code.put_goto(try_end_label) + code.put_label(our_error_label) + for temp_name, temp_type in temps_to_clean_up: + code.put_xdecref_clear(temp_name, temp_type) + + outer_except = code.funcstate.current_except + # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed. + code.funcstate.current_except = self + for except_clause in self.except_clauses: + except_clause.generate_handling_code(code, except_end_label) + code.funcstate.current_except = outer_except + + if not self.has_default_clause: + code.put_goto(except_error_label) + + label_intercepts = code.label_interceptor( + [except_error_label, try_break_label, try_continue_label, try_return_label, except_return_label], + [old_error_label, old_break_label, old_continue_label, old_return_label, old_return_label], + skip_to_label=try_end_label if not normal_case_terminates and not code.label_used(try_end_label) else None, + pos=self.pos, + trace=False, + ) + + for _ in label_intercepts: + if can_raise: + restore_saved_exception() + + if code.label_used(except_end_label): + if not normal_case_terminates and not code.label_used(try_end_label): + code.put_goto(try_end_label) + code.put_label(except_end_label) + if can_raise: + restore_saved_exception() + if code.label_used(try_end_label): + code.put_label(try_end_label) + code.putln("}") + + for cname in exc_save_vars: + code.funcstate.release_temp(cname) + + code.return_label = old_return_label + code.break_label = old_break_label + code.continue_label = old_continue_label + code.error_label = old_error_label + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(env, code) + for except_clause in self.except_clauses: + except_clause.generate_function_definitions(env, code) + if self.else_clause is not None: + self.else_clause.generate_function_definitions(env, code) + + def annotate(self, code): + self.body.annotate(code) + for except_node in self.except_clauses: + except_node.annotate(code) + if self.else_clause: + self.else_clause.annotate(code) + + +class ExceptClauseNode(Node): + # Part of try ... except statement. + # + # pattern [ExprNode] + # target ExprNode or None + # body StatNode + # excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!) + # match_flag string result of exception match + # exc_value ExcValueNode used internally + # function_name string qualified name of enclosing function + # exc_vars (string * 3) local exception variables + # is_except_as bool Py3-style "except ... as xyz" + + # excinfo_target is never set by the parser, but can be set by a transform + # in order to extract more extensive information about the exception as a + # sys.exc_info()-style tuple into a target variable + + child_attrs = ["pattern", "target", "body", "exc_value"] + + exc_value = None + excinfo_target = None + is_except_as = False + + def analyse_declarations(self, env): + if self.target: + self.target.analyse_target_declaration(env) + self.body.analyse_declarations(env) + + def analyse_expressions(self, env): + self.function_name = env.qualified_name + if self.pattern: + # normalise/unpack self.pattern into a list + for i, pattern in enumerate(self.pattern): + pattern = pattern.analyse_expressions(env) + self.pattern[i] = pattern.coerce_to_pyobject(env) + + if self.target: + from . import ExprNodes + self.exc_value = ExprNodes.ExcValueNode(self.pos) + self.target = self.target.analyse_target_expression(env, self.exc_value) + + self.body = self.body.analyse_expressions(env) + return self + + def generate_handling_code(self, code, end_label): + code.mark_pos(self.pos) + + if self.pattern: + has_non_literals = not all( + pattern.is_literal or pattern.is_simple() and not pattern.is_temp + for pattern in self.pattern) + + if has_non_literals: + # For non-trivial exception check expressions, hide the live exception from C-API calls. + exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True) + for _ in range(3)] + code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")) + code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars)) + exc_type = exc_vars[0] + else: + exc_vars = exc_type = None + + for pattern in self.pattern: + pattern.generate_evaluation_code(code) + patterns = [pattern.py_result() for pattern in self.pattern] + + exc_tests = [] + if exc_type: + code.globalstate.use_utility_code( + UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c")) + if len(patterns) == 2: + exc_tests.append("__Pyx_PyErr_GivenExceptionMatches2(%s, %s, %s)" % ( + exc_type, patterns[0], patterns[1], + )) + else: + exc_tests.extend( + "__Pyx_PyErr_GivenExceptionMatches(%s, %s)" % (exc_type, pattern) + for pattern in patterns + ) + elif len(patterns) == 2: + code.globalstate.use_utility_code( + UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c")) + exc_tests.append("__Pyx_PyErr_ExceptionMatches2(%s, %s)" % ( + patterns[0], patterns[1], + )) + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c")) + exc_tests.extend( + "__Pyx_PyErr_ExceptionMatches(%s)" % pattern + for pattern in patterns + ) + + match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests))) + for pattern in self.pattern: + pattern.generate_disposal_code(code) + pattern.free_temps(code) + + if exc_vars: + code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars)) + code.putln(' '.join(["%s = 0;" % var for var in exc_vars])) + for temp in exc_vars: + code.funcstate.release_temp(temp) + + code.putln( + "if (%s) {" % + match_flag) + code.funcstate.release_temp(match_flag) + else: + code.putln("/*except:*/ {") + + if (not getattr(self.body, 'stats', True) + and self.excinfo_target is None + and self.target is None): + # most simple case: no exception variable, empty body (pass) + # => reset the exception state, done + code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")) + code.putln("__Pyx_ErrRestore(0,0,0);") + code.put_goto(end_label) + code.putln("}") + return + + exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True) + for _ in range(3)] + code.put_add_traceback(self.function_name) + # We always have to fetch the exception value even if + # there is no target, because this also normalises the + # exception and stores it in the thread state. + code.globalstate.use_utility_code(get_exception_utility_code) + exc_args = "&%s, &%s, &%s" % tuple(exc_vars) + code.putln("if (__Pyx_GetException(%s) < 0) %s" % ( + exc_args, code.error_goto(self.pos))) + for var in exc_vars: + code.put_xgotref(var, py_object_type) + if self.target: + self.exc_value.set_var(exc_vars[1]) + self.exc_value.generate_evaluation_code(code) + self.target.generate_assignment_code(self.exc_value, code) + if self.excinfo_target is not None: + for tempvar, node in zip(exc_vars, self.excinfo_target.args): + node.set_var(tempvar) + + old_loop_labels = code.new_loop_labels("except_") + + old_exc_vars = code.funcstate.exc_vars + code.funcstate.exc_vars = exc_vars + self.body.generate_execution_code(code) + code.funcstate.exc_vars = old_exc_vars + + if not self.body.is_terminator: + for var in exc_vars: + # FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars), + # but I don't think it's the right solution. + code.put_xdecref_clear(var, py_object_type) + code.put_goto(end_label) + + for _ in code.label_interceptor(code.get_loop_labels(), old_loop_labels): + for i, var in enumerate(exc_vars): + # Traceback may be NULL. + (code.put_decref_clear if i < 2 else code.put_xdecref_clear)(var, py_object_type) + + code.set_loop_labels(old_loop_labels) + + for temp in exc_vars: + code.funcstate.release_temp(temp) + + code.putln( + "}") + + def generate_function_definitions(self, env, code): + if self.target is not None: + self.target.generate_function_definitions(env, code) + self.body.generate_function_definitions(env, code) + + def annotate(self, code): + if self.pattern: + for pattern in self.pattern: + pattern.annotate(code) + if self.target: + self.target.annotate(code) + self.body.annotate(code) + + +class TryFinallyStatNode(StatNode): + # try ... finally statement + # + # body StatNode + # finally_clause StatNode + # finally_except_clause deep-copy of finally_clause for exception case + # in_generator inside of generator => must store away current exception also in return case + # + # Each of the continue, break, return and error gotos runs + # into its own deep-copy of the finally block code. + # In addition, if we're doing an error, we save the + # exception on entry to the finally block and restore + # it on exit. + + child_attrs = ["body", "finally_clause", "finally_except_clause"] + + preserve_exception = 1 + + # handle exception case, in addition to return/break/continue + handle_error_case = True + func_return_type = None + finally_except_clause = None + + is_try_finally_in_nogil = False + in_generator = False + + @staticmethod + def create_analysed(pos, env, body, finally_clause): + node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause) + return node + + def analyse_declarations(self, env): + self.body.analyse_declarations(env) + self.finally_except_clause = copy.deepcopy(self.finally_clause) + self.finally_except_clause.analyse_declarations(env) + self.finally_clause.analyse_declarations(env) + + def analyse_expressions(self, env): + self.body = self.body.analyse_expressions(env) + self.finally_clause = self.finally_clause.analyse_expressions(env) + self.finally_except_clause = self.finally_except_clause.analyse_expressions(env) + if env.return_type and not env.return_type.is_void: + self.func_return_type = env.return_type + return self + + nogil_check = Node.gil_error + gil_message = "Try-finally statement" + + def generate_execution_code(self, code): + code.mark_pos(self.pos) # before changing the error label, in case of tracing errors + code.putln("/*try:*/ {") + + old_error_label = code.error_label + old_labels = code.all_new_labels() + new_labels = code.get_all_labels() + new_error_label = code.error_label + if not self.handle_error_case: + code.error_label = old_error_label + catch_label = code.new_label() + + was_in_try_finally = code.funcstate.in_try_finally + code.funcstate.in_try_finally = 1 + + self.body.generate_execution_code(code) + + code.funcstate.in_try_finally = was_in_try_finally + code.putln("}") + + temps_to_clean_up = code.funcstate.all_free_managed_temps() + code.mark_pos(self.finally_clause.pos) + code.putln("/*finally:*/ {") + + # Reset labels only after writing out a potential line trace call for correct nogil error handling. + code.set_all_labels(old_labels) + + def fresh_finally_clause(_next=[self.finally_clause]): + # generate the original subtree once and always keep a fresh copy + node = _next[0] + node_copy = copy.deepcopy(node) + if node is self.finally_clause: + _next[0] = node_copy + else: + node = node_copy + return node + + preserve_error = self.preserve_exception and code.label_used(new_error_label) + needs_success_cleanup = not self.finally_clause.is_terminator + + if not self.body.is_terminator: + code.putln('/*normal exit:*/{') + fresh_finally_clause().generate_execution_code(code) + if not self.finally_clause.is_terminator: + code.put_goto(catch_label) + code.putln('}') + + if preserve_error: + code.put_label(new_error_label) + code.putln('/*exception exit:*/{') + if not self.in_generator: + code.putln("__Pyx_PyThreadState_declare") + if self.is_try_finally_in_nogil: + code.declare_gilstate() + if needs_success_cleanup: + exc_lineno_cnames = tuple([ + code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + for _ in range(2)]) + exc_filename_cname = code.funcstate.allocate_temp( + PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)), + manage_ref=False) + else: + exc_lineno_cnames = exc_filename_cname = None + exc_vars = tuple([ + code.funcstate.allocate_temp(py_object_type, manage_ref=False) + for _ in range(6)]) + self.put_error_catcher( + code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname) + finally_old_labels = code.all_new_labels() + + code.putln('{') + old_exc_vars = code.funcstate.exc_vars + code.funcstate.exc_vars = exc_vars[:3] + self.finally_except_clause.generate_execution_code(code) + code.funcstate.exc_vars = old_exc_vars + code.putln('}') + + if needs_success_cleanup: + self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname) + if exc_lineno_cnames: + for cname in exc_lineno_cnames: + code.funcstate.release_temp(cname) + if exc_filename_cname: + code.funcstate.release_temp(exc_filename_cname) + code.put_goto(old_error_label) + + for _ in code.label_interceptor(code.get_all_labels(), finally_old_labels): + self.put_error_cleaner(code, exc_vars) + + for cname in exc_vars: + code.funcstate.release_temp(cname) + code.putln('}') + + code.set_all_labels(old_labels) + return_label = code.return_label + exc_vars = () + + # TODO: use code.label_interceptor()? + for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)): + if not code.label_used(new_label): + continue + if new_label == new_error_label and preserve_error: + continue # handled above + + code.putln('%s: {' % new_label) + ret_temp = None + if old_label == return_label: + # return actually raises an (uncatchable) exception in generators that we must preserve + if self.in_generator: + exc_vars = tuple([ + code.funcstate.allocate_temp(py_object_type, manage_ref=False) + for _ in range(6)]) + self.put_error_catcher(code, [], exc_vars) + if not self.finally_clause.is_terminator: + # store away return value for later reuse + if (self.func_return_type and + not self.is_try_finally_in_nogil and + not isinstance(self.finally_clause, GILExitNode)): + ret_temp = code.funcstate.allocate_temp( + self.func_return_type, manage_ref=False) + code.putln("%s = %s;" % (ret_temp, Naming.retval_cname)) + if self.func_return_type.is_pyobject: + code.putln("%s = 0;" % Naming.retval_cname) + + fresh_finally_clause().generate_execution_code(code) + + if old_label == return_label: + if ret_temp: + code.putln("%s = %s;" % (Naming.retval_cname, ret_temp)) + if self.func_return_type.is_pyobject: + code.putln("%s = 0;" % ret_temp) + code.funcstate.release_temp(ret_temp) + if self.in_generator: + self.put_error_uncatcher(code, exc_vars) + for cname in exc_vars: + code.funcstate.release_temp(cname) + + if not self.finally_clause.is_terminator: + code.put_goto(old_label) + code.putln('}') + + # End finally + code.put_label(catch_label) + code.putln( + "}") + + def generate_function_definitions(self, env, code): + self.body.generate_function_definitions(env, code) + self.finally_clause.generate_function_definitions(env, code) + if self.finally_except_clause: + self.finally_except_clause.generate_function_definitions(env, code) + + def put_error_catcher(self, code, temps_to_clean_up, exc_vars, + exc_lineno_cnames=None, exc_filename_cname=None): + code.globalstate.use_utility_code(restore_exception_utility_code) + code.globalstate.use_utility_code(get_exception_utility_code) + code.globalstate.use_utility_code(swap_exception_utility_code) + + if self.is_try_finally_in_nogil: + code.put_ensure_gil(declare_gilstate=False) + code.putln("__Pyx_PyThreadState_assign") + + code.putln(' '.join(["%s = 0;" % var for var in exc_vars])) + for temp_name, type in temps_to_clean_up: + code.put_xdecref_clear(temp_name, type) + + # not using preprocessor here to avoid warnings about + # unused utility functions and/or temps + code.putln("if (PY_MAJOR_VERSION >= 3)" + " __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:]) + code.putln("if ((PY_MAJOR_VERSION < 3) ||" + # if __Pyx_GetException() fails in Py3, + # store the newly raised exception instead + " unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) " + "__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2)) + for var in exc_vars: + code.put_xgotref(var, py_object_type) + if exc_lineno_cnames: + code.putln("%s = %s; %s = %s; %s = %s;" % ( + exc_lineno_cnames[0], Naming.lineno_cname, + exc_lineno_cnames[1], Naming.clineno_cname, + exc_filename_cname, Naming.filename_cname)) + + if self.is_try_finally_in_nogil: + code.put_release_ensured_gil() + + def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None): + code.globalstate.use_utility_code(restore_exception_utility_code) + code.globalstate.use_utility_code(reset_exception_utility_code) + + if self.is_try_finally_in_nogil: + code.put_ensure_gil(declare_gilstate=False) + # although the thread state is already assigned, that can't be trusted after releasing the GIL + code.putln("__Pyx_PyThreadState_assign") + + # not using preprocessor here to avoid warnings about + # unused utility functions and/or temps + code.putln("if (PY_MAJOR_VERSION >= 3) {") + for var in exc_vars[3:]: + code.put_xgiveref(var, py_object_type) + code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:]) + code.putln("}") + for var in exc_vars[:3]: + code.put_xgiveref(var, py_object_type) + code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3]) + + if self.is_try_finally_in_nogil: + code.put_release_ensured_gil() + + code.putln(' '.join(["%s = 0;" % var for var in exc_vars])) + if exc_lineno_cnames: + code.putln("%s = %s; %s = %s; %s = %s;" % ( + Naming.lineno_cname, exc_lineno_cnames[0], + Naming.clineno_cname, exc_lineno_cnames[1], + Naming.filename_cname, exc_filename_cname)) + + def put_error_cleaner(self, code, exc_vars): + code.globalstate.use_utility_code(reset_exception_utility_code) + if self.is_try_finally_in_nogil: + code.put_ensure_gil(declare_gilstate=False) + # although the thread state is already assigned, that can't be trusted after releasing the GIL + code.putln("__Pyx_PyThreadState_assign") + + # not using preprocessor here to avoid warnings about + # unused utility functions and/or temps + code.putln("if (PY_MAJOR_VERSION >= 3) {") + for var in exc_vars[3:]: + code.put_xgiveref(var, py_object_type) + code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:]) + code.putln("}") + for var in exc_vars[:3]: + code.put_xdecref_clear(var, py_object_type) + if self.is_try_finally_in_nogil: + code.put_release_ensured_gil() + code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:]) + + def annotate(self, code): + self.body.annotate(code) + self.finally_clause.annotate(code) + + +class NogilTryFinallyStatNode(TryFinallyStatNode): + """ + A try/finally statement that may be used in nogil code sections. + """ + + preserve_exception = False + nogil_check = None + + +class GILStatNode(NogilTryFinallyStatNode): + # 'with gil' or 'with nogil' statement + # + # state string 'gil' or 'nogil' + # scope_gil_state_known bool For nogil functions this can be False, since they can also be run with gil + # set to False by GilCheck transform + + child_attrs = ["condition"] + NogilTryFinallyStatNode.child_attrs + state_temp = None + scope_gil_state_known = True + + def __init__(self, pos, state, body, condition=None): + self.state = state + self.condition = condition + self.create_state_temp_if_needed(pos, state, body) + TryFinallyStatNode.__init__( + self, pos, + body=body, + finally_clause=GILExitNode( + pos, state=state, state_temp=self.state_temp)) + + def create_state_temp_if_needed(self, pos, state, body): + from .ParseTreeTransforms import YieldNodeCollector + collector = YieldNodeCollector() + collector.visitchildren(body) + if not collector.yields: + return + + if state == 'gil': + temp_type = PyrexTypes.c_gilstate_type + else: + temp_type = PyrexTypes.c_threadstate_ptr_type + from . import ExprNodes + self.state_temp = ExprNodes.TempNode(pos, temp_type) + + def analyse_declarations(self, env): + env._in_with_gil_block = (self.state == 'gil') + if self.state == 'gil': + env.has_with_gil_block = True + + if self.condition is not None: + self.condition.analyse_declarations(env) + + return super(GILStatNode, self).analyse_declarations(env) + + def analyse_expressions(self, env): + env.use_utility_code( + UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c")) + + if self.condition is not None: + self.condition = self.condition.analyse_expressions(env) + + was_nogil = env.nogil + env.nogil = self.state == 'nogil' + node = TryFinallyStatNode.analyse_expressions(self, env) + env.nogil = was_nogil + return node + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + code.begin_block() + if self.state_temp: + self.state_temp.allocate(code) + variable = self.state_temp.result() + else: + variable = None + + old_gil_config = code.funcstate.gil_owned + if self.state == 'gil': + code.put_ensure_gil(variable=variable) + code.funcstate.gil_owned = True + else: + code.put_release_gil(variable=variable, unknown_gil_state=not self.scope_gil_state_known) + code.funcstate.gil_owned = False + + TryFinallyStatNode.generate_execution_code(self, code) + + if self.state_temp: + self.state_temp.release(code) + + code.funcstate.gil_owned = old_gil_config + code.end_block() + + +class GILExitNode(StatNode): + """ + Used as the 'finally' block in a GILStatNode + + state string 'gil' or 'nogil' + # scope_gil_state_known bool For nogil functions this can be False, since they can also be run with gil + # set to False by GilCheck transform + """ + + child_attrs = [] + state_temp = None + scope_gil_state_known = True + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + if self.state_temp: + variable = self.state_temp.result() + else: + variable = None + + if self.state == 'gil': + code.put_release_ensured_gil(variable) + else: + code.put_acquire_gil(variable, unknown_gil_state=not self.scope_gil_state_known) + + +class EnsureGILNode(GILExitNode): + """ + Ensure the GIL in nogil functions for cleanup before returning. + """ + + def generate_execution_code(self, code): + code.put_ensure_gil(declare_gilstate=False) + + +def cython_view_utility_code(): + from . import MemoryView + return MemoryView.view_utility_code + + +utility_code_for_cimports = { + # utility code (or inlining c) in a pxd (or pyx) file. + # TODO: Consider a generic user-level mechanism for importing + 'cpython.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), + 'cpython.array.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), + 'cython.view' : cython_view_utility_code, +} + +utility_code_for_imports = { + # utility code used when special modules are imported. + # TODO: Consider a generic user-level mechanism for importing + 'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"), + 'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"), +} + +def cimport_numpy_check(node, code): + # shared code between CImportStatNode and FromCImportStatNode + # check to ensure that import_array is called + for mod in code.globalstate.module_node.scope.cimported_modules: + if mod.name != node.module_name: + continue + # there are sometimes several cimported modules with the same name + # so complete the loop if necessary + import_array = mod.lookup_here("import_array") + _import_array = mod.lookup_here("_import_array") + # at least one entry used + used = (import_array and import_array.used) or (_import_array and _import_array.used) + if ((import_array or _import_array) # at least one entry found + and not used): + # sanity check that this is actually numpy and not a user pxd called "numpy" + if _import_array and _import_array.type.is_cfunction: + # warning is mainly for the sake of testing + warning(node.pos, "'numpy.import_array()' has been added automatically " + "since 'numpy' was cimported but 'numpy.import_array' was not called.", 0) + code.globalstate.use_utility_code( + UtilityCode.load_cached("NumpyImportArray", "NumpyImportArray.c") + ) + return # no need to continue once the utility code is added + + + +class CImportStatNode(StatNode): + # cimport statement + # + # module_name string Qualified name of module being imported + # as_name string or None Name specified in "as" clause, if any + # is_absolute bool True for absolute imports, False otherwise + + child_attrs = [] + is_absolute = False + + def analyse_declarations(self, env): + if not env.is_module_scope: + error(self.pos, "cimport only allowed at module level") + return + module_scope = env.find_module( + self.module_name, self.pos, relative_level=0 if self.is_absolute else -1) + if "." in self.module_name: + names = [EncodedString(name) for name in self.module_name.split(".")] + top_name = names[0] + top_module_scope = env.context.find_submodule(top_name) + module_scope = top_module_scope + for name in names[1:]: + submodule_scope = module_scope.find_submodule(name) + module_scope.declare_module(name, submodule_scope, self.pos) + module_scope = submodule_scope + if self.as_name: + env.declare_module(self.as_name, module_scope, self.pos) + else: + env.add_imported_module(module_scope) + env.declare_module(top_name, top_module_scope, self.pos) + else: + name = self.as_name or self.module_name + entry = env.declare_module(name, module_scope, self.pos) + entry.known_standard_library_import = self.module_name + if self.module_name in utility_code_for_cimports: + env.use_utility_code(utility_code_for_cimports[self.module_name]()) + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + if self.module_name == "numpy": + cimport_numpy_check(self, code) + + +class FromCImportStatNode(StatNode): + # from ... cimport statement + # + # module_name string Qualified name of module + # relative_level int or None Relative import: number of dots before module_name + # imported_names [(pos, name, as_name)] Names to be imported + + child_attrs = [] + module_name = None + relative_level = None + imported_names = None + + def analyse_declarations(self, env): + if not env.is_module_scope: + error(self.pos, "cimport only allowed at module level") + return + qualified_name_components = env.qualified_name.count('.') + 1 + if self.relative_level: + if self.relative_level > qualified_name_components: + # 1. case: importing beyond package: from .. import pkg + error(self.pos, "relative cimport beyond main package is not allowed") + return + elif self.relative_level == qualified_name_components and not env.is_package: + # 2. case: importing from same level but current dir is not package: from . import module + error(self.pos, "relative cimport from non-package directory is not allowed") + return + module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level) + if not module_scope: + return + module_name = module_scope.qualified_name + env.add_imported_module(module_scope) + for pos, name, as_name in self.imported_names: + if name == "*": + for local_name, entry in list(module_scope.entries.items()): + env.add_imported_entry(local_name, entry, pos) + else: + entry = module_scope.lookup(name) + if entry: + entry.used = 1 + else: + is_relative_import = self.relative_level is not None and self.relative_level > 0 + submodule_scope = env.context.find_module( + name, from_module=module_scope, pos=self.pos, absolute_fallback=False, relative_import=is_relative_import) + if not submodule_scope: + continue + if submodule_scope.parent_module is module_scope: + env.declare_module(as_name or name, submodule_scope, self.pos) + else: + error(pos, "Name '%s' not declared in module '%s'" % (name, module_name)) + + if entry: + local_name = as_name or name + env.add_imported_entry(local_name, entry, pos) + + if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now + if module_name in utility_code_for_cimports: + env.use_utility_code(utility_code_for_cimports[module_name]()) + for _, name, _ in self.imported_names: + fqname = '%s.%s' % (module_name, name) + if fqname in utility_code_for_cimports: + env.use_utility_code(utility_code_for_cimports[fqname]()) + + def declaration_matches(self, entry, kind): + if not entry.is_type: + return 0 + type = entry.type + if kind == 'class': + if not type.is_extension_type: + return 0 + else: + if not type.is_struct_or_union: + return 0 + if kind != type.kind: + return 0 + return 1 + + def analyse_expressions(self, env): + return self + + def generate_execution_code(self, code): + if self.module_name == "numpy": + cimport_numpy_check(self, code) + + +class FromImportStatNode(StatNode): + # from ... import statement + # + # module ImportNode + # items [(string, NameNode)] + # interned_items [(string, NameNode, ExprNode)] + # item PyTempNode used internally + # import_star boolean used internally + + child_attrs = ["module"] + import_star = 0 + + def analyse_declarations(self, env): + for name, target in self.items: + if name == "*": + if not env.is_module_scope: + error(self.pos, "import * only allowed at module level") + return + env.has_import_star = 1 + self.import_star = 1 + else: + target.analyse_target_declaration(env) + if target.entry: + if target.get_known_standard_library_import() is None: + target.entry.known_standard_library_import = EncodedString( + "%s.%s" % (self.module.module_name.value, name)) + else: + # it isn't unambiguous + target.entry.known_standard_library_import = "" + + + def analyse_expressions(self, env): + from . import ExprNodes + self.module = self.module.analyse_expressions(env) + self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type) + self.interned_items = [] + for name, target in self.items: + if name == '*': + for _, entry in env.entries.items(): + if not entry.is_type and entry.type.is_extension_type: + env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c")) + break + else: + entry = env.lookup(target.name) + # check whether or not entry is already cimported + if (entry.is_type and entry.type.name == name + and hasattr(entry.type, 'module_name')): + if entry.type.module_name == self.module.module_name.value: + # cimported with absolute name + continue + try: + # cimported with relative name + module = env.find_module(self.module.module_name.value, pos=self.pos, + relative_level=self.module.level) + if entry.type.module_name == module.qualified_name: + continue + except AttributeError: + pass + target = target.analyse_target_expression(env, None) # FIXME? + if target.type is py_object_type: + coerced_item = None + else: + coerced_item = self.item.coerce_to(target.type, env) + self.interned_items.append((name, target, coerced_item)) + return self + + def generate_execution_code(self, code): + code.mark_pos(self.pos) + self.module.generate_evaluation_code(code) + if self.import_star: + code.putln( + 'if (%s(%s) < 0) %s;' % ( + Naming.import_star, + self.module.py_result(), + code.error_goto(self.pos))) + item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + self.item.set_cname(item_temp) + if self.interned_items: + code.globalstate.use_utility_code( + UtilityCode.load_cached("ImportFrom", "ImportExport.c")) + for name, target, coerced_item in self.interned_items: + code.putln( + '%s = __Pyx_ImportFrom(%s, %s); %s' % ( + item_temp, + self.module.py_result(), + code.intern_identifier(name), + code.error_goto_if_null(item_temp, self.pos))) + code.put_gotref(item_temp, py_object_type) + if coerced_item is None: + target.generate_assignment_code(self.item, code) + else: + coerced_item.allocate_temp_result(code) + coerced_item.generate_result_code(code) + target.generate_assignment_code(coerced_item, code) + code.put_decref_clear(item_temp, py_object_type) + code.funcstate.release_temp(item_temp) + self.module.generate_disposal_code(code) + self.module.free_temps(code) + + +class ParallelNode(Node): + """ + Base class for cython.parallel constructs. + """ + + nogil_check = None + + +class ParallelStatNode(StatNode, ParallelNode): + """ + Base class for 'with cython.parallel.parallel():' and 'for i in prange():'. + + assignments { Entry(var) : (var.pos, inplace_operator_or_None) } + assignments to variables in this parallel section + + parent parent ParallelStatNode or None + is_parallel indicates whether this node is OpenMP parallel + (true for #pragma omp parallel for and + #pragma omp parallel) + + is_parallel is true for: + + #pragma omp parallel + #pragma omp parallel for + + sections, but NOT for + + #pragma omp for + + We need this to determine the sharing attributes. + + privatization_insertion_point a code insertion point used to make temps + private (esp. the "nsteps" temp) + + args tuple the arguments passed to the parallel construct + kwargs DictNode the keyword arguments passed to the parallel + construct (replaced by its compile time value) + """ + + child_attrs = ['body', 'num_threads'] + + body = None + + is_prange = False + is_nested_prange = False + + error_label_used = False + + num_threads = None + chunksize = None + + parallel_exc = ( + Naming.parallel_exc_type, + Naming.parallel_exc_value, + Naming.parallel_exc_tb, + ) + + parallel_pos_info = ( + Naming.parallel_filename, + Naming.parallel_lineno, + Naming.parallel_clineno, + ) + + pos_info = ( + Naming.filename_cname, + Naming.lineno_cname, + Naming.clineno_cname, + ) + + critical_section_counter = 0 + + def __init__(self, pos, **kwargs): + super(ParallelStatNode, self).__init__(pos, **kwargs) + + # All assignments in this scope + self.assignments = kwargs.get('assignments') or {} + + # All seen closure cnames and their temporary cnames + self.seen_closure_vars = set() + + # Dict of variables that should be declared (first|last|)private or + # reduction { Entry: (op, lastprivate) }. + # If op is not None, it's a reduction. + self.privates = {} + + # [NameNode] + self.assigned_nodes = [] + + def analyse_declarations(self, env): + self.body.analyse_declarations(env) + + self.num_threads = None + + if self.kwargs: + # Try to find num_threads and chunksize keyword arguments + pairs = [] + seen = set() + for dictitem in self.kwargs.key_value_pairs: + if dictitem.key.value in seen: + error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value) + seen.add(dictitem.key.value) + if dictitem.key.value == 'num_threads': + if not dictitem.value.is_none: + self.num_threads = dictitem.value + elif self.is_prange and dictitem.key.value == 'chunksize': + if not dictitem.value.is_none: + self.chunksize = dictitem.value + else: + pairs.append(dictitem) + + self.kwargs.key_value_pairs = pairs + + try: + self.kwargs = self.kwargs.compile_time_value(env) + except Exception as e: + error(self.kwargs.pos, "Only compile-time values may be " + "supplied as keyword arguments") + else: + self.kwargs = {} + + for kw, val in self.kwargs.items(): + if kw not in self.valid_keyword_arguments: + error(self.pos, "Invalid keyword argument: %s" % kw) + else: + setattr(self, kw, val) + + def analyse_expressions(self, env): + if self.num_threads: + self.num_threads = self.num_threads.analyse_expressions(env) + + if self.chunksize: + self.chunksize = self.chunksize.analyse_expressions(env) + + self.body = self.body.analyse_expressions(env) + self.analyse_sharing_attributes(env) + + if self.num_threads is not None: + if self.parent and self.parent.num_threads is not None and not self.parent.is_prange: + error(self.pos, "num_threads already declared in outer section") + elif self.parent and not self.parent.is_prange: + error(self.pos, "num_threads must be declared in the parent parallel section") + elif (self.num_threads.type.is_int and + self.num_threads.is_literal and + self.num_threads.compile_time_value(env) <= 0): + error(self.pos, "argument to num_threads must be greater than 0") + + if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject: + self.num_threads = self.num_threads.coerce_to( + PyrexTypes.c_int_type, env).coerce_to_temp(env) + return self + + def analyse_sharing_attributes(self, env): + """ + Analyse the privates for this block and set them in self.privates. + This should be called in a post-order fashion during the + analyse_expressions phase + """ + for entry, (pos, op) in self.assignments.items(): + + if self.is_prange and not self.is_parallel: + # closely nested prange in a with parallel block, disallow + # assigning to privates in the with parallel block (we + # consider it too implicit and magicky for users) + if entry in self.parent.assignments: + error(pos, "Cannot assign to private of outer parallel block") + continue + + if not self.is_prange and op: + # Again possible, but considered to magicky + error(pos, "Reductions not allowed for parallel blocks") + continue + + # By default all variables should have the same values as if + # executed sequentially + lastprivate = True + self.propagate_var_privatization(entry, pos, op, lastprivate) + + def propagate_var_privatization(self, entry, pos, op, lastprivate): + """ + Propagate the sharing attributes of a variable. If the privatization is + determined by a parent scope, done propagate further. + + If we are a prange, we propagate our sharing attributes outwards to + other pranges. If we are a prange in parallel block and the parallel + block does not determine the variable private, we propagate to the + parent of the parent. Recursion stops at parallel blocks, as they have + no concept of lastprivate or reduction. + + So the following cases propagate: + + sum is a reduction for all loops: + + for i in prange(n): + for j in prange(n): + for k in prange(n): + sum += i * j * k + + sum is a reduction for both loops, local_var is private to the + parallel with block: + + for i in prange(n): + with parallel: + local_var = ... # private to the parallel + for j in prange(n): + sum += i * j + + Nested with parallel blocks are disallowed, because they wouldn't + allow you to propagate lastprivates or reductions: + + #pragma omp parallel for lastprivate(i) + for i in prange(n): + + sum = 0 + + #pragma omp parallel private(j, sum) + with parallel: + + #pragma omp parallel + with parallel: + + #pragma omp for lastprivate(j) reduction(+:sum) + for j in prange(n): + sum += i + + # sum and j are well-defined here + + # sum and j are undefined here + + # sum and j are undefined here + """ + self.privates[entry] = (op, lastprivate) + + if entry.type.is_memoryviewslice: + error(pos, "Memoryview slices can only be shared in parallel sections") + return + + if self.is_prange: + if not self.is_parallel and entry not in self.parent.assignments: + # Parent is a parallel with block + parent = self.parent.parent + else: + parent = self.parent + + # We don't need to propagate privates, only reductions and + # lastprivates + if parent and (op or lastprivate): + parent.propagate_var_privatization(entry, pos, op, lastprivate) + + def _allocate_closure_temp(self, code, entry): + """ + Helper function that allocate a temporary for a closure variable that + is assigned to. + """ + if self.parent: + return self.parent._allocate_closure_temp(code, entry) + + if entry.cname in self.seen_closure_vars: + return entry.cname + + cname = code.funcstate.allocate_temp(entry.type, True) + + # Add both the actual cname and the temp cname, as the actual cname + # will be replaced with the temp cname on the entry + self.seen_closure_vars.add(entry.cname) + self.seen_closure_vars.add(cname) + + self.modified_entries.append((entry, entry.cname)) + code.putln("%s = %s;" % (cname, entry.cname)) + entry.cname = cname + + def initialize_privates_to_nan(self, code, exclude=None): + first = True + + for entry, (op, lastprivate) in sorted(self.privates.items()): + if not op and (not exclude or entry != exclude): + invalid_value = entry.type.invalid_value() + + if invalid_value: + if first: + code.putln("/* Initialize private variables to " + "invalid values */") + first = False + code.putln("%s = %s;" % (entry.cname, + entry.type.cast_code(invalid_value))) + + def evaluate_before_block(self, code, expr): + c = self.begin_of_parallel_control_block_point_after_decls + # we need to set the owner to ourselves temporarily, as + # allocate_temp may generate a comment in the middle of our pragma + # otherwise when DebugFlags.debug_temp_code_comments is in effect + owner = c.funcstate.owner + c.funcstate.owner = c + expr.generate_evaluation_code(c) + c.funcstate.owner = owner + + return expr.result() + + def put_num_threads(self, code): + """ + Write self.num_threads if set as the num_threads OpenMP directive + """ + if self.num_threads is not None: + code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads)) + + + def declare_closure_privates(self, code): + """ + If a variable is in a scope object, we need to allocate a temp and + assign the value from the temp to the variable in the scope object + after the parallel section. This kind of copying should be done only + in the outermost parallel section. + """ + self.modified_entries = [] + + for entry in sorted(self.assignments): + if entry.from_closure or entry.in_closure: + self._allocate_closure_temp(code, entry) + + def release_closure_privates(self, code): + """ + Release any temps used for variables in scope objects. As this is the + outermost parallel block, we don't need to delete the cnames from + self.seen_closure_vars. + """ + for entry, original_cname in self.modified_entries: + code.putln("%s = %s;" % (original_cname, entry.cname)) + code.funcstate.release_temp(entry.cname) + entry.cname = original_cname + + def privatize_temps(self, code, exclude_temps=()): + """ + Make any used temporaries private. Before the relevant code block + code.start_collecting_temps() should have been called. + """ + c = self.privatization_insertion_point + self.privatization_insertion_point = None + + if self.is_parallel: + self.temps = temps = code.funcstate.stop_collecting_temps() + privates, firstprivates = [], [] + for temp, type in sorted(temps): + if type.is_pyobject or type.is_memoryviewslice: + firstprivates.append(temp) + else: + privates.append(temp) + + if privates: + c.put(" private(%s)" % ", ".join(privates)) + if firstprivates: + c.put(" firstprivate(%s)" % ", ".join(firstprivates)) + + if self.breaking_label_used: + shared_vars = [Naming.parallel_why] + if self.error_label_used: + shared_vars.extend(self.parallel_exc) + c.put(" private(%s, %s, %s)" % self.pos_info) + + c.put(" shared(%s)" % ', '.join(shared_vars)) + + def cleanup_temps(self, code): + # Now clean up any memoryview slice and object temporaries + if self.is_parallel and not self.is_nested_prange: + code.putln("/* Clean up any temporaries */") + for temp, type in sorted(self.temps): + code.put_xdecref_clear(temp, type, have_gil=False) + + def setup_parallel_control_flow_block(self, code): + """ + Sets up a block that surrounds the parallel block to determine + how the parallel section was exited. Any kind of return is + trapped (break, continue, return, exceptions). This is the idea: + + { + int why = 0; + + #pragma omp parallel + { + return # -> goto new_return_label; + goto end_parallel; + + new_return_label: + why = 3; + goto end_parallel; + + end_parallel:; + #pragma omp flush(why) # we need to flush for every iteration + } + + if (why == 3) + goto old_return_label; + } + """ + self.old_loop_labels = code.new_loop_labels() + self.old_error_label = code.new_error_label() + self.old_return_label = code.return_label + code.return_label = code.new_label(name="return") + + code.begin_block() # parallel control flow block + self.begin_of_parallel_control_block_point = code.insertion_point() + self.begin_of_parallel_control_block_point_after_decls = code.insertion_point() + + self.undef_builtin_expect_apple_gcc_bug(code) + + def begin_parallel_block(self, code): + """ + Each OpenMP thread in a parallel section that contains a with gil block + must have the thread-state initialized. The call to + PyGILState_Release() then deallocates our threadstate. If we wouldn't + do this, each with gil block would allocate and deallocate one, thereby + losing exception information before it can be saved before leaving the + parallel section. + """ + self.begin_of_parallel_block = code.insertion_point() + + def end_parallel_block(self, code): + """ + To ensure all OpenMP threads have thread states, we ensure the GIL + in each thread (which creates a thread state if it doesn't exist), + after which we release the GIL. + On exit, reacquire the GIL and release the thread state. + + If compiled without OpenMP support (at the C level), then we still have + to acquire the GIL to decref any object temporaries. + """ + begin_code = self.begin_of_parallel_block + self.begin_of_parallel_block = None + + if self.error_label_used: + end_code = code + + begin_code.putln("#ifdef _OPENMP") + begin_code.put_ensure_gil(declare_gilstate=True) + begin_code.putln("Py_BEGIN_ALLOW_THREADS") + begin_code.putln("#endif /* _OPENMP */") + + end_code.putln("#ifdef _OPENMP") + end_code.putln("Py_END_ALLOW_THREADS") + end_code.putln("#else") + end_code.put_safe("{\n") + end_code.put_ensure_gil() + end_code.putln("#endif /* _OPENMP */") + self.cleanup_temps(end_code) + end_code.put_release_ensured_gil() + end_code.putln("#ifndef _OPENMP") + end_code.put_safe("}\n") + end_code.putln("#endif /* _OPENMP */") + + def trap_parallel_exit(self, code, should_flush=False): + """ + Trap any kind of return inside a parallel construct. 'should_flush' + indicates whether the variable should be flushed, which is needed by + prange to skip the loop. It also indicates whether we need to register + a continue (we need this for parallel blocks, but not for prange + loops, as it is a direct jump there). + + It uses the same mechanism as try/finally: + 1 continue + 2 break + 3 return + 4 error + """ + save_lastprivates_label = code.new_label() + dont_return_label = code.new_label() + + self.any_label_used = False + self.breaking_label_used = False + self.error_label_used = False + + self.parallel_private_temps = [] + + all_labels = code.get_all_labels() + + # Figure this out before starting to generate any code + for label in all_labels: + if code.label_used(label): + self.breaking_label_used = (self.breaking_label_used or + label != code.continue_label) + self.any_label_used = True + + if self.any_label_used: + code.put_goto(dont_return_label) + + for i, label in enumerate(all_labels): + if not code.label_used(label): + continue + + is_continue_label = label == code.continue_label + + code.put_label(label) + + if not (should_flush and is_continue_label): + if label == code.error_label: + self.error_label_used = True + self.fetch_parallel_exception(code) + + code.putln("%s = %d;" % (Naming.parallel_why, i + 1)) + + if (self.breaking_label_used and self.is_prange and not + is_continue_label): + code.put_goto(save_lastprivates_label) + else: + code.put_goto(dont_return_label) + + if self.any_label_used: + if self.is_prange and self.breaking_label_used: + # Don't rely on lastprivate, save our lastprivates + code.put_label(save_lastprivates_label) + self.save_parallel_vars(code) + + code.put_label(dont_return_label) + + if should_flush and self.breaking_label_used: + code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why) + + def save_parallel_vars(self, code): + """ + The following shenanigans are instated when we break, return or + propagate errors from a prange. In this case we cannot rely on + lastprivate() to do its job, as no iterations may have executed yet + in the last thread, leaving the values undefined. It is most likely + that the breaking thread has well-defined values of the lastprivate + variables, so we keep those values. + """ + section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter + code.putln_openmp("#pragma omp critical(%s)" % section_name) + ParallelStatNode.critical_section_counter += 1 + + code.begin_block() # begin critical section + + c = self.begin_of_parallel_control_block_point + + temp_count = 0 + for entry, (op, lastprivate) in sorted(self.privates.items()): + if not lastprivate or entry.type.is_pyobject: + continue + + if entry.type.is_cpp_class and not entry.type.is_fake_reference and code.globalstate.directives['cpp_locals']: + type_decl = entry.type.cpp_optional_declaration_code("") + else: + type_decl = entry.type.empty_declaration_code() + temp_cname = "__pyx_parallel_temp%d" % temp_count + private_cname = entry.cname + + temp_count += 1 + + invalid_value = entry.type.invalid_value() + if invalid_value: + init = ' = ' + entry.type.cast_code(invalid_value) + else: + init = '' + # Declare the parallel private in the outer block + c.putln("%s %s%s;" % (type_decl, temp_cname, init)) + + self.parallel_private_temps.append((temp_cname, private_cname, entry.type)) + + if entry.type.is_cpp_class: + # moving is fine because we're quitting the loop and so won't be directly accessing the variable again + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) + private_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % private_cname + # Initialize before escaping + code.putln("%s = %s;" % (temp_cname, private_cname)) + + + + code.end_block() # end critical section + + def fetch_parallel_exception(self, code): + """ + As each OpenMP thread may raise an exception, we need to fetch that + exception from the threadstate and save it for after the parallel + section where it can be re-raised in the master thread. + + Although it would seem that __pyx_filename, __pyx_lineno and + __pyx_clineno are only assigned to under exception conditions (i.e., + when we have the GIL), and thus should be allowed to be shared without + any race condition, they are in fact subject to the same race + conditions that they were previously when they were global variables + and functions were allowed to release the GIL: + + thread A thread B + acquire + set lineno + release + acquire + set lineno + release + acquire + fetch exception + release + skip the fetch + + deallocate threadstate deallocate threadstate + """ + code.begin_block() + code.put_ensure_gil(declare_gilstate=True) + + code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type) + code.putln( + "if (!%s) {" % Naming.parallel_exc_type) + + code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc) + pos_info = chain(*zip(self.parallel_pos_info, self.pos_info)) + code.funcstate.uses_error_indicator = True + code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) + code.put_gotref(Naming.parallel_exc_type, py_object_type) + + code.putln( + "}") + + code.put_release_ensured_gil() + code.end_block() + + def restore_parallel_exception(self, code): + "Re-raise a parallel exception" + code.begin_block() + code.put_ensure_gil(declare_gilstate=True) + + code.put_giveref(Naming.parallel_exc_type, py_object_type) + code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc) + pos_info = chain(*zip(self.pos_info, self.parallel_pos_info)) + code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) + + code.put_release_ensured_gil() + code.end_block() + + def restore_labels(self, code): + """ + Restore all old labels. Call this before the 'else' clause to for + loops and always before ending the parallel control flow block. + """ + code.set_all_labels(self.old_loop_labels + (self.old_return_label, + self.old_error_label)) + + def end_parallel_control_flow_block( + self, code, break_=False, continue_=False, return_=False): + """ + This ends the parallel control flow block and based on how the parallel + section was exited, takes the corresponding action. The break_ and + continue_ parameters indicate whether these should be propagated + outwards: + + for i in prange(...): + with cython.parallel.parallel(): + continue + + Here break should be trapped in the parallel block, and propagated to + the for loop. + """ + c = self.begin_of_parallel_control_block_point + self.begin_of_parallel_control_block_point = None + self.begin_of_parallel_control_block_point_after_decls = None + + if self.num_threads is not None: + # FIXME: is it the right place? should not normally produce code. + self.num_threads.generate_disposal_code(code) + self.num_threads.free_temps(code) + + # Firstly, always prefer errors over returning, continue or break + if self.error_label_used: + c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info) + c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc) + + code.putln( + "if (%s) {" % Naming.parallel_exc_type) + code.putln("/* This may have been overridden by a continue, " + "break or return in another thread. Prefer the error. */") + code.putln("%s = 4;" % Naming.parallel_why) + code.putln( + "}") + + if continue_: + any_label_used = self.any_label_used + else: + any_label_used = self.breaking_label_used + + if any_label_used: + # __pyx_parallel_why is used, declare and initialize + c.putln("int %s;" % Naming.parallel_why) + c.putln("%s = 0;" % Naming.parallel_why) + + code.putln( + "if (%s) {" % Naming.parallel_why) + + for temp_cname, private_cname, temp_type in self.parallel_private_temps: + if temp_type.is_cpp_class: + # utility code was loaded earlier + temp_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % temp_cname + code.putln("%s = %s;" % (private_cname, temp_cname)) + + code.putln("switch (%s) {" % Naming.parallel_why) + if continue_: + code.put(" case 1: ") + code.put_goto(code.continue_label) + + if break_: + code.put(" case 2: ") + code.put_goto(code.break_label) + + if return_: + code.put(" case 3: ") + code.put_goto(code.return_label) + + if self.error_label_used: + code.globalstate.use_utility_code(restore_exception_utility_code) + code.putln(" case 4:") + self.restore_parallel_exception(code) + code.put_goto(code.error_label) + + code.putln("}") # end switch + code.putln( + "}") # end if + + code.end_block() # end parallel control flow block + self.redef_builtin_expect_apple_gcc_bug(code) + + # FIXME: improve with version number for OS X Lion + buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))" + have_expect_condition = "(defined(__GNUC__) && " \ + "(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))" + redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition) + + def undef_builtin_expect_apple_gcc_bug(self, code): + """ + A bug on OS X Lion disallows __builtin_expect macros. This code avoids them + """ + if not self.parent: + code.undef_builtin_expect(self.redef_condition) + + def redef_builtin_expect_apple_gcc_bug(self, code): + if not self.parent: + code.redef_builtin_expect(self.redef_condition) + + +class ParallelWithBlockNode(ParallelStatNode): + """ + This node represents a 'with cython.parallel.parallel():' block + """ + + valid_keyword_arguments = ['num_threads'] + + num_threads = None + + def analyse_declarations(self, env): + super(ParallelWithBlockNode, self).analyse_declarations(env) + if self.args: + error(self.pos, "cython.parallel.parallel() does not take " + "positional arguments") + + def generate_execution_code(self, code): + self.declare_closure_privates(code) + self.setup_parallel_control_flow_block(code) + + code.putln("#ifdef _OPENMP") + code.put("#pragma omp parallel ") + + if self.privates: + privates = [e.cname for e in self.privates + if not e.type.is_pyobject] + code.put('private(%s)' % ', '.join(sorted(privates))) + + self.privatization_insertion_point = code.insertion_point() + self.put_num_threads(code) + code.putln("") + + code.putln("#endif /* _OPENMP */") + + code.begin_block() # parallel block + self.begin_parallel_block(code) + self.initialize_privates_to_nan(code) + code.funcstate.start_collecting_temps() + self.body.generate_execution_code(code) + self.trap_parallel_exit(code) + self.privatize_temps(code) + self.end_parallel_block(code) + code.end_block() # end parallel block + + continue_ = code.label_used(code.continue_label) + break_ = code.label_used(code.break_label) + return_ = code.label_used(code.return_label) + + self.restore_labels(code) + self.end_parallel_control_flow_block(code, break_=break_, + continue_=continue_, + return_=return_) + self.release_closure_privates(code) + + +class ParallelRangeNode(ParallelStatNode): + """ + This node represents a 'for i in cython.parallel.prange():' construct. + + target NameNode the target iteration variable + else_clause Node or None the else clause of this loop + """ + + child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads', + 'chunksize'] + + body = target = else_clause = args = None + + start = stop = step = None + + is_prange = True + + nogil = None + schedule = None + + valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize'] + + def __init__(self, pos, **kwds): + super(ParallelRangeNode, self).__init__(pos, **kwds) + # Pretend to be a ForInStatNode for control flow analysis + self.iterator = PassStatNode(pos) + + def analyse_declarations(self, env): + super(ParallelRangeNode, self).analyse_declarations(env) + self.target.analyse_target_declaration(env) + if self.else_clause is not None: + self.else_clause.analyse_declarations(env) + + if not self.args or len(self.args) > 3: + error(self.pos, "Invalid number of positional arguments to prange") + return + + if len(self.args) == 1: + self.stop, = self.args + elif len(self.args) == 2: + self.start, self.stop = self.args + else: + self.start, self.stop, self.step = self.args + + if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'): + error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,)) + + def analyse_expressions(self, env): + was_nogil = env.nogil + if self.nogil: + env.nogil = True + + if self.target is None: + error(self.pos, "prange() can only be used as part of a for loop") + return self + + self.target = self.target.analyse_target_types(env) + + if not self.target.type.is_numeric: + # Not a valid type, assume one for now anyway + + if not self.target.type.is_pyobject: + # nogil_check will catch the is_pyobject case + error(self.target.pos, + "Must be of numeric type, not %s" % self.target.type) + + self.index_type = PyrexTypes.c_py_ssize_t_type + else: + self.index_type = self.target.type + + # Setup start, stop and step, allocating temps if needed + self.names = 'start', 'stop', 'step' + start_stop_step = self.start, self.stop, self.step + + for node, name in zip(start_stop_step, self.names): + if node is not None: + node.analyse_types(env) + if not node.type.is_numeric: + error(node.pos, "%s argument must be numeric" % name) + continue + + if not node.is_literal: + node = node.coerce_to_temp(env) + setattr(self, name, node) + + # As we range from 0 to nsteps, computing the index along the + # way, we need a fitting type for 'i' and 'nsteps' + self.index_type = PyrexTypes.widest_numeric_type( + self.index_type, node.type) + + if self.else_clause is not None: + self.else_clause = self.else_clause.analyse_expressions(env) + + # Although not actually an assignment in this scope, it should be + # treated as such to ensure it is unpacked if a closure temp, and to + # ensure lastprivate behaviour and propagation. If the target index is + # not a NameNode, it won't have an entry, and an error was issued by + # ParallelRangeTransform + target_entry = getattr(self.target, 'entry', None) + if target_entry: + self.assignments[self.target.entry] = self.target.pos, None + + node = super(ParallelRangeNode, self).analyse_expressions(env) + + if node.chunksize: + if not node.schedule: + error(node.chunksize.pos, + "Must provide schedule with chunksize") + elif node.schedule == 'runtime': + error(node.chunksize.pos, + "Chunksize not valid for the schedule runtime") + elif (node.chunksize.type.is_int and + node.chunksize.is_literal and + node.chunksize.compile_time_value(env) <= 0): + error(node.chunksize.pos, "Chunksize must not be negative") + + node.chunksize = node.chunksize.coerce_to( + PyrexTypes.c_int_type, env).coerce_to_temp(env) + + if node.nogil: + env.nogil = was_nogil + + node.is_nested_prange = node.parent and node.parent.is_prange + if node.is_nested_prange: + parent = node + while parent.parent and parent.parent.is_prange: + parent = parent.parent + + parent.assignments.update(node.assignments) + parent.privates.update(node.privates) + parent.assigned_nodes.extend(node.assigned_nodes) + return node + + def nogil_check(self, env): + names = 'start', 'stop', 'step', 'target' + nodes = self.start, self.stop, self.step, self.target + for name, node in zip(names, nodes): + if node is not None and node.type.is_pyobject: + error(node.pos, "%s may not be a Python object " + "as we don't have the GIL" % name) + + def generate_execution_code(self, code): + """ + Generate code in the following steps + + 1) copy any closure variables determined thread-private + into temporaries + + 2) allocate temps for start, stop and step + + 3) generate a loop that calculates the total number of steps, + which then computes the target iteration variable for every step: + + for i in prange(start, stop, step): + ... + + becomes + + nsteps = (stop - start) / step; + i = start; + + #pragma omp parallel for lastprivate(i) + for (temp = 0; temp < nsteps; temp++) { + i = start + step * temp; + ... + } + + Note that accumulation of 'i' would have a data dependency + between iterations. + + Also, you can't do this + + for (i = start; i < stop; i += step) + ... + + as the '<' operator should become '>' for descending loops. + 'for i from x < i < y:' does not suffer from this problem + as the relational operator is known at compile time! + + 4) release our temps and write back any private closure variables + """ + self.declare_closure_privates(code) + + # This can only be a NameNode + target_index_cname = self.target.entry.cname + + # This will be used as the dict to format our code strings, holding + # the start, stop , step, temps and target cnames + fmt_dict = { + 'target': target_index_cname, + 'target_type': self.target.type.empty_declaration_code() + } + + # Setup start, stop and step, allocating temps if needed + start_stop_step = self.start, self.stop, self.step + defaults = '0', '0', '1' + for node, name, default in zip(start_stop_step, self.names, defaults): + if node is None: + result = default + elif node.is_literal: + result = node.get_constant_c_result_code() + else: + node.generate_evaluation_code(code) + result = node.result() + + fmt_dict[name] = result + + fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False) + fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False) + + # TODO: check if the step is 0 and if so, raise an exception in a + # 'with gil' block. For now, just abort + if self.step is not None and self.step.has_constant_result() and self.step.constant_result == 0: + error(node.pos, "Iteration with step 0 is invalid.") + elif not fmt_dict['step'].isdigit() or int(fmt_dict['step']) == 0: + code.putln("if (((%(step)s) == 0)) abort();" % fmt_dict) + + self.setup_parallel_control_flow_block(code) # parallel control flow block + + # Note: nsteps is private in an outer scope if present + code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict) + + # The target iteration variable might not be initialized, do it only if + # we are executing at least 1 iteration, otherwise we should leave the + # target unaffected. The target iteration variable is firstprivate to + # shut up compiler warnings caused by lastprivate, as the compiler + # erroneously believes that nsteps may be <= 0, leaving the private + # target index uninitialized + code.putln("if (%(nsteps)s > 0)" % fmt_dict) + code.begin_block() # if block + self.generate_loop(code, fmt_dict) + code.end_block() # end if block + + self.restore_labels(code) + + if self.else_clause: + if self.breaking_label_used: + code.put("if (%s < 2)" % Naming.parallel_why) + + code.begin_block() # else block + code.putln("/* else */") + self.else_clause.generate_execution_code(code) + code.end_block() # end else block + + # ------ cleanup ------ + self.end_parallel_control_flow_block(code) # end parallel control flow block + + # And finally, release our privates and write back any closure + # variables + for temp in start_stop_step + (self.chunksize,): + if temp is not None: + temp.generate_disposal_code(code) + temp.free_temps(code) + + code.funcstate.release_temp(fmt_dict['i']) + code.funcstate.release_temp(fmt_dict['nsteps']) + + self.release_closure_privates(code) + + def generate_loop(self, code, fmt_dict): + if self.is_nested_prange: + code.putln("#if 0") + else: + code.putln("#ifdef _OPENMP") + + if not self.is_parallel: + code.put("#pragma omp for") + self.privatization_insertion_point = code.insertion_point() + reduction_codepoint = self.parent.privatization_insertion_point + else: + code.put("#pragma omp parallel") + self.privatization_insertion_point = code.insertion_point() + reduction_codepoint = self.privatization_insertion_point + code.putln("") + code.putln("#endif /* _OPENMP */") + + code.begin_block() # pragma omp parallel begin block + + # Initialize the GIL if needed for this thread + self.begin_parallel_block(code) + + if self.is_nested_prange: + code.putln("#if 0") + else: + code.putln("#ifdef _OPENMP") + code.put("#pragma omp for") + + for entry, (op, lastprivate) in sorted(self.privates.items()): + # Don't declare the index variable as a reduction + if op and op in "+*-&^|" and entry != self.target.entry: + if entry.type.is_pyobject: + error(self.pos, "Python objects cannot be reductions") + else: + #code.put(" reduction(%s:%s)" % (op, entry.cname)) + # This is the only way reductions + nesting works in gcc4.5 + reduction_codepoint.put( + " reduction(%s:%s)" % (op, entry.cname)) + else: + if entry == self.target.entry: + code.put(" firstprivate(%s)" % entry.cname) + code.put(" lastprivate(%s)" % entry.cname) + continue + + if not entry.type.is_pyobject: + if lastprivate: + private = 'lastprivate' + else: + private = 'private' + + code.put(" %s(%s)" % (private, entry.cname)) + + if self.schedule: + if self.chunksize: + chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize) + else: + chunksize = "" + + code.put(" schedule(%s%s)" % (self.schedule, chunksize)) + + self.put_num_threads(reduction_codepoint) + + code.putln("") + code.putln("#endif /* _OPENMP */") + + code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict) + code.begin_block() # for loop block + + guard_around_body_codepoint = code.insertion_point() + + # Start if guard block around the body. This may be unnecessary, but + # at least it doesn't spoil indentation + code.begin_block() + + code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict) + self.initialize_privates_to_nan(code, exclude=self.target.entry) + + if self.is_parallel and not self.is_nested_prange: + # nested pranges are not omp'ified, temps go to outer loops + code.funcstate.start_collecting_temps() + + self.body.generate_execution_code(code) + self.trap_parallel_exit(code, should_flush=True) + if self.is_parallel and not self.is_nested_prange: + # nested pranges are not omp'ified, temps go to outer loops + self.privatize_temps(code) + + if self.breaking_label_used: + # Put a guard around the loop body in case return, break or + # exceptions might be used + guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why) + + code.end_block() # end guard around loop body + code.end_block() # end for loop block + + if self.is_parallel: + # Release the GIL and deallocate the thread state + self.end_parallel_block(code) + code.end_block() # pragma omp parallel end block + + +class CnameDecoratorNode(StatNode): + """ + This node is for the cname decorator in CythonUtilityCode: + + @cname('the_cname') + cdef func(...): + ... + + In case of a cdef class the cname specifies the objstruct_cname. + + node the node to which the cname decorator is applied + cname the cname the node should get + """ + + child_attrs = ['node'] + + def analyse_declarations(self, env): + self.node.analyse_declarations(env) + + node = self.node + if isinstance(node, CompilerDirectivesNode): + node = node.body.stats[0] + + self.is_function = isinstance(node, FuncDefNode) + is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode)) + e = node.entry + + if self.is_function: + e.cname = self.cname + e.func_cname = self.cname + e.used = True + if e.pyfunc_cname and '.' in e.pyfunc_cname: + e.pyfunc_cname = self.mangle(e.pyfunc_cname) + elif is_struct_or_enum: + e.cname = e.type.cname = self.cname + else: + scope = node.scope + + e.cname = self.cname + e.type.objstruct_cname = self.cname + '_obj' + e.type.typeobj_cname = Naming.typeobj_prefix + self.cname + e.type.typeptr_cname = self.cname + '_type' + e.type.scope.namespace_cname = e.type.typeptr_cname + + e.as_variable.cname = e.type.typeptr_cname + + scope.scope_prefix = self.cname + "_" + + for name, entry in scope.entries.items(): + if entry.func_cname: + entry.func_cname = self.mangle(entry.cname) + if entry.pyfunc_cname: + entry.pyfunc_cname = self.mangle(entry.pyfunc_cname) + + def mangle(self, cname): + if '.' in cname: + # remove __pyx_base from func_cname + cname = cname.split('.')[-1] + return '%s_%s' % (self.cname, cname) + + def analyse_expressions(self, env): + self.node = self.node.analyse_expressions(env) + return self + + def generate_function_definitions(self, env, code): + "Ensure a prototype for every @cname method in the right place" + if self.is_function and env.is_c_class_scope: + # method in cdef class, generate a prototype in the header + h_code = code.globalstate['utility_code_proto'] + + if isinstance(self.node, DefNode): + self.node.generate_function_header( + h_code, with_pymethdef=False, proto_only=True) + else: + from . import ModuleNode + entry = self.node.entry + cname = entry.cname + entry.cname = entry.func_cname + + ModuleNode.generate_cfunction_declaration( + entry, + env.global_scope(), + h_code, + definition=True) + + entry.cname = cname + + self.node.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + self.node.generate_execution_code(code) + + +#------------------------------------------------------------------------------------ +# +# Runtime support code +# +#------------------------------------------------------------------------------------ + +if Options.gcc_branch_hints: + branch_prediction_macros = """ +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) \ + && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +""" +else: + branch_prediction_macros = """ +#define likely(x) (x) +#define unlikely(x) (x) +""" + +#------------------------------------------------------------------------------------ + +printing_utility_code = UtilityCode.load_cached("Print", "Printing.c") +printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c") + +#------------------------------------------------------------------------------------ + +# Exception raising code +# +# Exceptions are raised by __Pyx_Raise() and stored as plain +# type/value/tb in PyThreadState->curexc_*. When being caught by an +# 'except' statement, curexc_* is moved over to exc_* by +# __Pyx_GetException() + +restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c") +raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c") +get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c") +swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c") +reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c") +traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c") + +#------------------------------------------------------------------------------------ + +get_exception_tuple_utility_code = UtilityCode( + proto=""" +static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/ +""", + # I doubt that calling __Pyx_GetException() here is correct as it moves + # the exception from tstate->curexc_* to tstate->exc_*, which prevents + # exception handlers later on from receiving it. + # NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro + impl = """ +static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) { + PyObject *type = NULL, *value = NULL, *tb = NULL; + if (__Pyx_GetException(&type, &value, &tb) == 0) { + PyObject* exc_info = PyTuple_New(3); + if (exc_info) { + Py_INCREF(type); + Py_INCREF(value); + Py_INCREF(tb); + PyTuple_SET_ITEM(exc_info, 0, type); + PyTuple_SET_ITEM(exc_info, 1, value); + PyTuple_SET_ITEM(exc_info, 2, tb); + return exc_info; + } + } + return NULL; +} +""", + requires=[get_exception_utility_code]) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Options.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Options.py new file mode 100644 index 0000000000000000000000000000000000000000..61950a7d7c96e6817a5ca1989289994bfc5db887 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Options.py @@ -0,0 +1,799 @@ +# +# Cython - Compilation-wide options and pragma declarations +# + +from __future__ import absolute_import + +import os + +from .. import Utils + + +class ShouldBeFromDirective(object): + + known_directives = [] + + def __init__(self, options_name, directive_name=None, disallow=False): + self.options_name = options_name + self.directive_name = directive_name or options_name + self.disallow = disallow + self.known_directives.append(self) + + def __nonzero__(self): + self._bad_access() + + def __int__(self): + self._bad_access() + + def _bad_access(self): + raise RuntimeError(repr(self)) + + def __repr__(self): + return "Illegal access of '%s' from Options module rather than directive '%s'" % ( + self.options_name, self.directive_name) + + +""" +The members of this module are documented using autodata in +Cython/docs/src/reference/compilation.rst. +See https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#directive-autoattribute +for how autodata works. +Descriptions of those members should start with a #: +Donc forget to keep the docs in sync by removing and adding +the members in both this file and the .rst file. +""" + +#: Whether or not to include docstring in the Python extension. If False, the binary size +#: will be smaller, but the ``__doc__`` attribute of any class or function will be an +#: empty string. +docstrings = True + +#: Embed the source code position in the docstrings of functions and classes. +embed_pos_in_docstring = False + +# undocumented +pre_import = None + +#: Decref global variables in each module on exit for garbage collection. +#: 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects +#: Mostly for reducing noise in Valgrind as it typically executes at process exit +#: (when all memory will be reclaimed anyways). +#: Note that directly or indirectly executed cleanup code that makes use of global +#: variables or types may no longer be safe when enabling the respective level since +#: there is no guaranteed order in which the (reference counted) objects will +#: be cleaned up. The order can change due to live references and reference cycles. +generate_cleanup_code = False + +#: Should tp_clear() set object fields to None instead of clearing them to NULL? +clear_to_none = True + +#: Generate an annotated HTML version of the input source files for debugging and optimisation purposes. +#: This has the same effect as the ``annotate`` argument in :func:`cythonize`. +annotate = False + +# When annotating source files in HTML, include coverage information from +# this file. +annotate_coverage_xml = None + +#: This will abort the compilation on the first error occurred rather than trying +#: to keep going and printing further error messages. +fast_fail = False + +#: Turn all warnings into errors. +warning_errors = False + +#: Make unknown names an error. Python raises a NameError when +#: encountering unknown names at runtime, whereas this option makes +#: them a compile time error. If you want full Python compatibility, +#: you should disable this option and also 'cache_builtins'. +error_on_unknown_names = True + +#: Make uninitialized local variable reference a compile time error. +#: Python raises UnboundLocalError at runtime, whereas this option makes +#: them a compile time error. Note that this option affects only variables +#: of "python object" type. +error_on_uninitialized = True + +#: This will convert statements of the form ``for i in range(...)`` +#: to ``for i from ...`` when ``i`` is a C integer type, and the direction +#: (i.e. sign of step) can be determined. +#: WARNING: This may change the semantics if the range causes assignment to +#: i to overflow. Specifically, if this option is set, an error will be +#: raised before the loop is entered, whereas without this option the loop +#: will execute until an overflowing value is encountered. +convert_range = True + +#: Perform lookups on builtin names only once, at module initialisation +#: time. This will prevent the module from getting imported if a +#: builtin name that it uses cannot be found during initialisation. +#: Default is True. +#: Note that some legacy builtins are automatically remapped +#: from their Python 2 names to their Python 3 names by Cython +#: when building in Python 3.x, +#: so that they do not get in the way even if this option is enabled. +cache_builtins = True + +#: Generate branch prediction hints to speed up error handling etc. +gcc_branch_hints = True + +#: Enable this to allow one to write ``your_module.foo = ...`` to overwrite the +#: definition if the cpdef function foo, at the cost of an extra dictionary +#: lookup on every call. +#: If this is false it generates only the Python wrapper and no override check. +lookup_module_cpdef = False + +#: Whether or not to embed the Python interpreter, for use in making a +#: standalone executable or calling from external libraries. +#: This will provide a C function which initialises the interpreter and +#: executes the body of this module. +#: See `this demo `_ +#: for a concrete example. +#: If true, the initialisation function is the C main() function, but +#: this option can also be set to a non-empty string to provide a function name explicitly. +#: Default is False. +embed = None + +# In previous iterations of Cython, globals() gave the first non-Cython module +# globals in the call stack. Sage relies on this behavior for variable injection. +old_style_globals = ShouldBeFromDirective('old_style_globals') + +#: Allows cimporting from a pyx file without a pxd file. +cimport_from_pyx = False + +#: Maximum number of dimensions for buffers -- set lower than number of +#: dimensions in numpy, as +#: slices are passed by value and involve a lot of copying. +buffer_max_dims = 8 + +#: Number of function closure instances to keep in a freelist (0: no freelists) +closure_freelist_size = 8 + + +def get_directive_defaults(): + # To add an item to this list, all accesses should be changed to use the new + # directive, and the global option itself should be set to an instance of + # ShouldBeFromDirective. + for old_option in ShouldBeFromDirective.known_directives: + value = globals().get(old_option.options_name) + assert old_option.directive_name in _directive_defaults + if not isinstance(value, ShouldBeFromDirective): + if old_option.disallow: + raise RuntimeError( + "Option '%s' must be set from directive '%s'" % ( + old_option.option_name, old_option.directive_name)) + else: + # Warn? + _directive_defaults[old_option.directive_name] = value + return _directive_defaults + +def copy_inherited_directives(outer_directives, **new_directives): + # A few directives are not copied downwards and this function removes them. + # For example, test_assert_path_exists and test_fail_if_path_exists should not be inherited + # otherwise they can produce very misleading test failures + new_directives_out = dict(outer_directives) + for name in ('test_assert_path_exists', 'test_fail_if_path_exists', 'test_assert_c_code_has', 'test_fail_if_c_code_has'): + new_directives_out.pop(name, None) + new_directives_out.update(new_directives) + return new_directives_out + +# Declare compiler directives +_directive_defaults = { + 'binding': True, # was False before 3.0 + 'boundscheck' : True, + 'nonecheck' : False, + 'initializedcheck' : True, + 'embedsignature': False, + 'embedsignature.format': 'c', + 'auto_cpdef': False, + 'auto_pickle': None, + 'cdivision': False, # was True before 0.12 + 'cdivision_warnings': False, + 'cpow': None, # was True before 3.0 + # None (not set by user) is treated as slightly different from False + 'c_api_binop_methods': False, # was True before 3.0 + 'overflowcheck': False, + 'overflowcheck.fold': True, + 'always_allow_keywords': True, + 'allow_none_for_extension_args': True, + 'wraparound' : True, + 'ccomplex' : False, # use C99/C++ for complex types and arith + 'callspec' : "", + 'nogil' : False, + 'gil' : False, + 'with_gil' : False, + 'profile': False, + 'linetrace': False, + 'emit_code_comments': True, # copy original source code into C code comments + 'annotation_typing': True, # read type declarations from Python function annotations + 'infer_types': None, + 'infer_types.verbose': False, + 'autotestdict': True, + 'autotestdict.cdef': False, + 'autotestdict.all': False, + 'language_level': None, + 'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere. + 'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode + 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079). + 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax. + 'c_string_type': 'bytes', + 'c_string_encoding': '', + 'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types + 'unraisable_tracebacks': True, + 'old_style_globals': False, + 'np_pythran': False, + 'fast_gil': False, + 'cpp_locals': False, # uses std::optional for C++ locals, so that they work more like Python locals + 'legacy_implicit_noexcept': False, + + # set __file__ and/or __path__ to known source/target path at import time (instead of not having them available) + 'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module" + + 'warn': None, + 'warn.undeclared': False, + 'warn.unreachable': True, + 'warn.maybe_uninitialized': False, + 'warn.unused': False, + 'warn.unused_arg': False, + 'warn.unused_result': False, + 'warn.multiple_declarators': True, + 'show_performance_hints': True, + +# optimizations + 'optimize.inline_defnode_calls': True, + 'optimize.unpack_method_calls': True, # increases code size when True + 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True + 'optimize.use_switch': True, + +# remove unreachable code + 'remove_unreachable': True, + +# control flow debug directives + 'control_flow.dot_output': "", # Graphviz output filename + 'control_flow.dot_annotate_defs': False, # Annotate definitions + +# test support + 'test_assert_path_exists' : [], + 'test_fail_if_path_exists' : [], + 'test_assert_c_code_has' : [], + 'test_fail_if_c_code_has' : [], + +# experimental, subject to change + 'formal_grammar': False, +} + +# Extra warning directives +extra_warnings = { + 'warn.maybe_uninitialized': True, + 'warn.unreachable': True, + 'warn.unused': True, +} + +def one_of(*args): + def validate(name, value): + if value not in args: + raise ValueError("%s directive must be one of %s, got '%s'" % ( + name, args, value)) + else: + return value + return validate + + +def normalise_encoding_name(option_name, encoding): + """ + >>> normalise_encoding_name('c_string_encoding', 'ascii') + 'ascii' + >>> normalise_encoding_name('c_string_encoding', 'AsCIi') + 'ascii' + >>> normalise_encoding_name('c_string_encoding', 'us-ascii') + 'ascii' + >>> normalise_encoding_name('c_string_encoding', 'utF8') + 'utf8' + >>> normalise_encoding_name('c_string_encoding', 'utF-8') + 'utf8' + >>> normalise_encoding_name('c_string_encoding', 'deFAuLT') + 'default' + >>> normalise_encoding_name('c_string_encoding', 'default') + 'default' + >>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding') + 'SeriousLyNoSuch--Encoding' + """ + if not encoding: + return '' + if encoding.lower() in ('default', 'ascii', 'utf8'): + return encoding.lower() + import codecs + try: + decoder = codecs.getdecoder(encoding) + except LookupError: + return encoding # may exists at runtime ... + for name in ('ascii', 'utf8'): + if codecs.getdecoder(name) == decoder: + return name + return encoding + +# use as a sential value to defer analysis of the arguments +# instead of analysing them in InterpretCompilerDirectives. The dataclass directives are quite +# complicated and it's easier to deal with them at the point the dataclass is created +class DEFER_ANALYSIS_OF_ARGUMENTS: + pass +DEFER_ANALYSIS_OF_ARGUMENTS = DEFER_ANALYSIS_OF_ARGUMENTS() + +# Override types possibilities above, if needed +directive_types = { + 'language_level': str, # values can be None/2/3/'3str', where None == 2+warning + 'auto_pickle': bool, + 'locals': dict, + 'final' : bool, # final cdef classes and methods + 'collection_type': one_of('sequence'), + 'nogil' : DEFER_ANALYSIS_OF_ARGUMENTS, + 'gil' : DEFER_ANALYSIS_OF_ARGUMENTS, + 'with_gil' : None, + 'internal' : bool, # cdef class visibility in the module dict + 'infer_types' : bool, # values can be True/None/False + 'binding' : bool, + 'cfunc' : None, # decorators do not take directive value + 'ccall' : None, + 'ufunc': None, + 'cpow' : bool, + 'inline' : None, + 'staticmethod' : None, + 'cclass' : None, + 'no_gc_clear' : bool, + 'no_gc' : bool, + 'returns' : type, + 'exceptval': type, # actually (type, check=True/False), but has its own parser + 'set_initial_path': str, + 'freelist': int, + 'c_string_type': one_of('bytes', 'bytearray', 'str', 'unicode'), + 'c_string_encoding': normalise_encoding_name, + 'trashcan': bool, + 'total_ordering': None, + 'dataclasses.dataclass': DEFER_ANALYSIS_OF_ARGUMENTS, + 'dataclasses.field': DEFER_ANALYSIS_OF_ARGUMENTS, + 'embedsignature.format': one_of('c', 'clinic', 'python'), +} + +for key, val in _directive_defaults.items(): + if key not in directive_types: + directive_types[key] = type(val) + +directive_scopes = { # defaults to available everywhere + # 'module', 'function', 'class', 'with statement' + 'auto_pickle': ('module', 'cclass'), + 'final' : ('cclass', 'function'), + 'collection_type': ('cclass',), + 'nogil' : ('function', 'with statement'), + 'gil' : ('with statement'), + 'with_gil' : ('function',), + 'inline' : ('function',), + 'cfunc' : ('function', 'with statement'), + 'ccall' : ('function', 'with statement'), + 'returns' : ('function',), + 'exceptval' : ('function',), + 'locals' : ('function',), + 'staticmethod' : ('function',), # FIXME: analysis currently lacks more specific function scope + 'no_gc_clear' : ('cclass',), + 'no_gc' : ('cclass',), + 'internal' : ('cclass',), + 'cclass' : ('class', 'cclass', 'with statement'), + 'autotestdict' : ('module',), + 'autotestdict.all' : ('module',), + 'autotestdict.cdef' : ('module',), + 'set_initial_path' : ('module',), + 'test_assert_path_exists' : ('function', 'class', 'cclass'), + 'test_fail_if_path_exists' : ('function', 'class', 'cclass'), + 'test_assert_c_code_has' : ('module',), + 'test_fail_if_c_code_has' : ('module',), + 'freelist': ('cclass',), + 'emit_code_comments': ('module',), + # Avoid scope-specific to/from_py_functions for c_string. + 'c_string_type': ('module',), + 'c_string_encoding': ('module',), + 'type_version_tag': ('module', 'cclass'), + 'language_level': ('module',), + # globals() could conceivably be controlled at a finer granularity, + # but that would complicate the implementation + 'old_style_globals': ('module',), + 'np_pythran': ('module',), + 'fast_gil': ('module',), + 'iterable_coroutine': ('module', 'function'), + 'trashcan' : ('cclass',), + 'total_ordering': ('class', 'cclass'), + 'dataclasses.dataclass' : ('class', 'cclass'), + 'cpp_locals': ('module', 'function', 'cclass'), # I don't think they make sense in a with_statement + 'ufunc': ('function',), + 'legacy_implicit_noexcept': ('module', ), +} + + +# A list of directives that (when used as a decorator) are only applied to +# the object they decorate and not to its children. +immediate_decorator_directives = { + 'cfunc', 'ccall', 'cclass', 'dataclasses.dataclass', 'ufunc', + # function signature directives + 'inline', 'exceptval', 'returns', 'with_gil', # 'nogil', + # class directives + 'freelist', 'no_gc', 'no_gc_clear', 'type_version_tag', 'final', + 'auto_pickle', 'internal', 'collection_type', 'total_ordering', + # testing directives + 'test_fail_if_path_exists', 'test_assert_path_exists', +} + + +def parse_directive_value(name, value, relaxed_bool=False): + """ + Parses value as an option value for the given name and returns + the interpreted value. None is returned if the option does not exist. + + >>> print(parse_directive_value('nonexisting', 'asdf asdfd')) + None + >>> parse_directive_value('boundscheck', 'True') + True + >>> parse_directive_value('boundscheck', 'true') + Traceback (most recent call last): + ... + ValueError: boundscheck directive must be set to True or False, got 'true' + + >>> parse_directive_value('c_string_encoding', 'us-ascii') + 'ascii' + >>> parse_directive_value('c_string_type', 'str') + 'str' + >>> parse_directive_value('c_string_type', 'bytes') + 'bytes' + >>> parse_directive_value('c_string_type', 'bytearray') + 'bytearray' + >>> parse_directive_value('c_string_type', 'unicode') + 'unicode' + >>> parse_directive_value('c_string_type', 'unnicode') + Traceback (most recent call last): + ValueError: c_string_type directive must be one of ('bytes', 'bytearray', 'str', 'unicode'), got 'unnicode' + """ + type = directive_types.get(name) + if not type: + return None + orig_value = value + if type is bool: + value = str(value) + if value == 'True': + return True + if value == 'False': + return False + if relaxed_bool: + value = value.lower() + if value in ("true", "yes"): + return True + elif value in ("false", "no"): + return False + raise ValueError("%s directive must be set to True or False, got '%s'" % ( + name, orig_value)) + elif type is int: + try: + return int(value) + except ValueError: + raise ValueError("%s directive must be set to an integer, got '%s'" % ( + name, orig_value)) + elif type is str: + return str(value) + elif callable(type): + return type(name, value) + else: + assert False + + +def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False, + current_settings=None): + """ + Parses a comma-separated list of pragma options. Whitespace + is not considered. + + >>> parse_directive_list(' ') + {} + >>> (parse_directive_list('boundscheck=True') == + ... {'boundscheck': True}) + True + >>> parse_directive_list(' asdf') + Traceback (most recent call last): + ... + ValueError: Expected "=" in option "asdf" + >>> parse_directive_list('boundscheck=hey') + Traceback (most recent call last): + ... + ValueError: boundscheck directive must be set to True or False, got 'hey' + >>> parse_directive_list('unknown=True') + Traceback (most recent call last): + ... + ValueError: Unknown option: "unknown" + >>> warnings = parse_directive_list('warn.all=True') + >>> len(warnings) > 1 + True + >>> sum(warnings.values()) == len(warnings) # all true. + True + """ + if current_settings is None: + result = {} + else: + result = current_settings + for item in s.split(','): + item = item.strip() + if not item: + continue + if '=' not in item: + raise ValueError('Expected "=" in option "%s"' % item) + name, value = [s.strip() for s in item.strip().split('=', 1)] + if name not in _directive_defaults: + found = False + if name.endswith('.all'): + prefix = name[:-3] + for directive in _directive_defaults: + if directive.startswith(prefix): + found = True + parsed_value = parse_directive_value(directive, value, relaxed_bool=relaxed_bool) + result[directive] = parsed_value + if not found and not ignore_unknown: + raise ValueError('Unknown option: "%s"' % name) + elif directive_types.get(name) is list: + if name in result: + result[name].append(value) + else: + result[name] = [value] + else: + parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool) + result[name] = parsed_value + return result + + +def parse_variable_value(value): + """ + Parses value as an option value for the given name and returns + the interpreted value. + + >>> parse_variable_value('True') + True + >>> parse_variable_value('true') + 'true' + >>> parse_variable_value('us-ascii') + 'us-ascii' + >>> parse_variable_value('str') + 'str' + >>> parse_variable_value('123') + 123 + >>> parse_variable_value('1.23') + 1.23 + + """ + if value == "True": + return True + elif value == "False": + return False + elif value == "None": + return None + elif value.isdigit(): + return int(value) + else: + try: + value = float(value) + except Exception: + # Not a float + pass + return value + + +def parse_compile_time_env(s, current_settings=None): + """ + Parses a comma-separated list of pragma options. Whitespace + is not considered. + + >>> parse_compile_time_env(' ') + {} + >>> (parse_compile_time_env('HAVE_OPENMP=True') == + ... {'HAVE_OPENMP': True}) + True + >>> parse_compile_time_env(' asdf') + Traceback (most recent call last): + ... + ValueError: Expected "=" in option "asdf" + >>> parse_compile_time_env('NUM_THREADS=4') == {'NUM_THREADS': 4} + True + >>> parse_compile_time_env('unknown=anything') == {'unknown': 'anything'} + True + """ + if current_settings is None: + result = {} + else: + result = current_settings + for item in s.split(','): + item = item.strip() + if not item: + continue + if '=' not in item: + raise ValueError('Expected "=" in option "%s"' % item) + name, value = [s.strip() for s in item.split('=', 1)] + result[name] = parse_variable_value(value) + return result + + +# ------------------------------------------------------------------------ +# CompilationOptions are constructed from user input and are the `option` +# object passed throughout the compilation pipeline. + +class CompilationOptions(object): + r""" + See default_options at the end of this module for a list of all possible + options and CmdLine.usage and CmdLine.parse_command_line() for their + meaning. + """ + def __init__(self, defaults=None, **kw): + self.include_path = [] + if defaults: + if isinstance(defaults, CompilationOptions): + defaults = defaults.__dict__ + else: + defaults = default_options + + options = dict(defaults) + options.update(kw) + + # let's assume 'default_options' contains a value for most known compiler options + # and validate against them + unknown_options = set(options) - set(default_options) + # ignore valid options that are not in the defaults + unknown_options.difference_update(['include_path']) + if unknown_options: + message = "got unknown compilation option%s, please remove: %s" % ( + 's' if len(unknown_options) > 1 else '', + ', '.join(unknown_options)) + raise ValueError(message) + + directive_defaults = get_directive_defaults() + directives = dict(options['compiler_directives']) # copy mutable field + # check for invalid directives + unknown_directives = set(directives) - set(directive_defaults) + if unknown_directives: + message = "got unknown compiler directive%s: %s" % ( + 's' if len(unknown_directives) > 1 else '', + ', '.join(unknown_directives)) + raise ValueError(message) + options['compiler_directives'] = directives + if directives.get('np_pythran', False) and not options['cplus']: + import warnings + warnings.warn("C++ mode forced when in Pythran mode!") + options['cplus'] = True + if 'language_level' not in kw and directives.get('language_level'): + options['language_level'] = directives['language_level'] + elif not options.get('language_level'): + options['language_level'] = directive_defaults.get('language_level') + if 'formal_grammar' in directives and 'formal_grammar' not in kw: + options['formal_grammar'] = directives['formal_grammar'] + if options['cache'] is True: + options['cache'] = os.path.join(Utils.get_cython_cache_dir(), 'compiler') + + self.__dict__.update(options) + + def configure_language_defaults(self, source_extension): + if source_extension == 'py': + if self.compiler_directives.get('binding') is None: + self.compiler_directives['binding'] = True + + def get_fingerprint(self): + r""" + Return a string that contains all the options that are relevant for cache invalidation. + """ + # Collect only the data that can affect the generated file(s). + data = {} + + for key, value in self.__dict__.items(): + if key in ['show_version', 'errors_to_stderr', 'verbose', 'quiet']: + # verbosity flags have no influence on the compilation result + continue + elif key in ['output_file', 'output_dir']: + # ignore the exact name of the output file + continue + elif key in ['depfile']: + # external build system dependency tracking file does not influence outputs + continue + elif key in ['timestamps']: + # the cache cares about the content of files, not about the timestamps of sources + continue + elif key in ['cache']: + # hopefully caching has no influence on the compilation result + continue + elif key in ['compiler_directives']: + # directives passed on to the C compiler do not influence the generated C code + continue + elif key in ['include_path']: + # this path changes which headers are tracked as dependencies, + # it has no influence on the generated C code + continue + elif key in ['working_path']: + # this path changes where modules and pxd files are found; + # their content is part of the fingerprint anyway, their + # absolute path does not matter + continue + elif key in ['create_extension']: + # create_extension() has already mangled the options, e.g., + # embedded_metadata, when the fingerprint is computed so we + # ignore it here. + continue + elif key in ['build_dir']: + # the (temporary) directory where we collect dependencies + # has no influence on the C output + continue + elif key in ['use_listing_file', 'generate_pxi', 'annotate', 'annotate_coverage_xml']: + # all output files are contained in the cache so the types of + # files generated must be part of the fingerprint + data[key] = value + elif key in ['formal_grammar', 'evaluate_tree_assertions']: + # these bits can change whether compilation to C passes/fails + data[key] = value + elif key in ['embedded_metadata', 'emit_linenums', + 'c_line_in_traceback', 'gdb_debug', + 'relative_path_in_code_position_comments']: + # the generated code contains additional bits when these are set + data[key] = value + elif key in ['cplus', 'language_level', 'compile_time_env', 'np_pythran']: + # assorted bits that, e.g., influence the parser + data[key] = value + elif key == ['capi_reexport_cincludes']: + if self.capi_reexport_cincludes: + # our caching implementation does not yet include fingerprints of all the header files + raise NotImplementedError('capi_reexport_cincludes is not compatible with Cython caching') + elif key == ['common_utility_include_dir']: + if self.common_utility_include_dir: + raise NotImplementedError('common_utility_include_dir is not compatible with Cython caching yet') + else: + # any unexpected option should go into the fingerprint; it's better + # to recompile than to return incorrect results from the cache. + data[key] = value + + def to_fingerprint(item): + r""" + Recursively turn item into a string, turning dicts into lists with + deterministic ordering. + """ + if isinstance(item, dict): + item = sorted([(repr(key), to_fingerprint(value)) for key, value in item.items()]) + return repr(item) + + return to_fingerprint(data) + + +# ------------------------------------------------------------------------ +# +# Set the default options depending on the platform +# +# ------------------------------------------------------------------------ + +default_options = dict( + show_version=0, + use_listing_file=0, + errors_to_stderr=1, + cplus=0, + output_file=None, + depfile=None, + annotate=None, + annotate_coverage_xml=None, + generate_pxi=0, + capi_reexport_cincludes=0, + working_path="", + timestamps=None, + verbose=0, + quiet=0, + compiler_directives={}, + embedded_metadata={}, + evaluate_tree_assertions=False, + emit_linenums=False, + relative_path_in_code_position_comments=True, + c_line_in_traceback=True, + language_level=None, # warn but default to 2 + formal_grammar=False, + gdb_debug=False, + compile_time_env=None, + module_name=None, + common_utility_include_dir=None, + output_dir=None, + build_dir=None, + cache=None, + create_extension=None, + np_pythran=False, + legacy_implicit_noexcept=None, +) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ParseTreeTransforms.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ParseTreeTransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..7368f1af46c37713cc3d9da38fdecf13accb42f4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/ParseTreeTransforms.py @@ -0,0 +1,4234 @@ +# cython: language_level=3str + +from __future__ import absolute_import + +import cython +cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object, + Options=object, UtilNodes=object, LetNode=object, + LetRefNode=object, TreeFragment=object, EncodedString=object, + error=object, warning=object, copy=object, hashlib=object, sys=object, + _unicode=object) + +import copy +import hashlib +import sys + +from . import PyrexTypes +from . import Naming +from . import ExprNodes +from . import Nodes +from . import Options +from . import Builtin +from . import Errors + +from .Visitor import VisitorTransform, TreeVisitor +from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform +from .UtilNodes import LetNode, LetRefNode +from .TreeFragment import TreeFragment +from .StringEncoding import EncodedString, _unicode +from .Errors import error, warning, CompileError, InternalError +from .Code import UtilityCode + + +class SkipDeclarations(object): + """ + Variable and function declarations can often have a deep tree structure, + and yet most transformations don't need to descend to this depth. + + Declaration nodes are removed after AnalyseDeclarationsTransform, so there + is no need to use this for transformations after that point. + """ + def visit_CTypeDefNode(self, node): + return node + + def visit_CVarDefNode(self, node): + return node + + def visit_CDeclaratorNode(self, node): + return node + + def visit_CBaseTypeNode(self, node): + return node + + def visit_CEnumDefNode(self, node): + return node + + def visit_CStructOrUnionDefNode(self, node): + return node + + def visit_CppClassNode(self, node): + if node.visibility != "extern": + # Need to traverse methods. + self.visitchildren(node) + return node + + +class NormalizeTree(CythonTransform): + """ + This transform fixes up a few things after parsing + in order to make the parse tree more suitable for + transforms. + + a) After parsing, blocks with only one statement will + be represented by that statement, not by a StatListNode. + When doing transforms this is annoying and inconsistent, + as one cannot in general remove a statement in a consistent + way and so on. This transform wraps any single statements + in a StatListNode containing a single statement. + + b) The PassStatNode is a noop and serves no purpose beyond + plugging such one-statement blocks; i.e., once parsed a +` "pass" can just as well be represented using an empty + StatListNode. This means less special cases to worry about + in subsequent transforms (one always checks to see if a + StatListNode has no children to see if the block is empty). + """ + + def __init__(self, context): + super(NormalizeTree, self).__init__(context) + self.is_in_statlist = False + self.is_in_expr = False + + def visit_ModuleNode(self, node): + self.visitchildren(node) + if not isinstance(node.body, Nodes.StatListNode): + # This can happen when the body only consists of a single (unused) declaration and no statements. + node.body = Nodes.StatListNode(pos=node.pos, stats=[node.body]) + return node + + def visit_ExprNode(self, node): + stacktmp = self.is_in_expr + self.is_in_expr = True + self.visitchildren(node) + self.is_in_expr = stacktmp + return node + + def visit_StatNode(self, node, is_listcontainer=False): + stacktmp = self.is_in_statlist + self.is_in_statlist = is_listcontainer + self.visitchildren(node) + self.is_in_statlist = stacktmp + if not self.is_in_statlist and not self.is_in_expr: + return Nodes.StatListNode(pos=node.pos, stats=[node]) + else: + return node + + def visit_StatListNode(self, node): + self.is_in_statlist = True + self.visitchildren(node) + self.is_in_statlist = False + return node + + def visit_ParallelAssignmentNode(self, node): + return self.visit_StatNode(node, True) + + def visit_CEnumDefNode(self, node): + return self.visit_StatNode(node, True) + + def visit_CStructOrUnionDefNode(self, node): + return self.visit_StatNode(node, True) + + def visit_PassStatNode(self, node): + """Eliminate PassStatNode""" + if not self.is_in_statlist: + return Nodes.StatListNode(pos=node.pos, stats=[]) + else: + return [] + + def visit_ExprStatNode(self, node): + """Eliminate useless string literals""" + if node.expr.is_string_literal: + return self.visit_PassStatNode(node) + else: + return self.visit_StatNode(node) + + def visit_CDeclaratorNode(self, node): + return node + + +class PostParseError(CompileError): pass + +# error strings checked by unit tests, so define them +ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions' +ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)' +ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared' +class PostParse(ScopeTrackingTransform): + """ + Basic interpretation of the parse tree, as well as validity + checking that can be done on a very basic level on the parse + tree (while still not being a problem with the basic syntax, + as such). + + Specifically: + - Default values to cdef assignments are turned into single + assignments following the declaration (everywhere but in class + bodies, where they raise a compile error) + + - Interpret some node structures into Python runtime values. + Some nodes take compile-time arguments (currently: + TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}), + which should be interpreted. This happens in a general way + and other steps should be taken to ensure validity. + + Type arguments cannot be interpreted in this way. + + - For __cythonbufferdefaults__ the arguments are checked for + validity. + + TemplatedTypeNode has its directives interpreted: + Any first positional argument goes into the "dtype" attribute, + any "ndim" keyword argument goes into the "ndim" attribute and + so on. Also it is checked that the directive combination is valid. + - __cythonbufferdefaults__ attributes are parsed and put into the + type information. + + Note: Currently Parsing.py does a lot of interpretation and + reorganization that can be refactored into this transform + if a more pure Abstract Syntax Tree is wanted. + + - Some invalid uses of := assignment expressions are detected + """ + def __init__(self, context): + super(PostParse, self).__init__(context) + self.specialattribute_handlers = { + '__cythonbufferdefaults__' : self.handle_bufferdefaults + } + + def visit_LambdaNode(self, node): + # unpack a lambda expression into the corresponding DefNode + collector = YieldNodeCollector() + collector.visitchildren(node.result_expr) + if collector.has_yield or collector.has_await or isinstance(node.result_expr, ExprNodes.YieldExprNode): + body = Nodes.ExprStatNode( + node.result_expr.pos, expr=node.result_expr) + else: + body = Nodes.ReturnStatNode( + node.result_expr.pos, value=node.result_expr) + node.def_node = Nodes.DefNode( + node.pos, name=node.name, + args=node.args, star_arg=node.star_arg, + starstar_arg=node.starstar_arg, + body=body, doc=None) + self.visitchildren(node) + return node + + def visit_GeneratorExpressionNode(self, node): + # unpack a generator expression into the corresponding DefNode + collector = YieldNodeCollector() + collector.visitchildren(node.loop, attrs=None, exclude=["iterator"]) + node.def_node = Nodes.DefNode( + node.pos, name=node.name, doc=None, + args=[], star_arg=None, starstar_arg=None, + body=node.loop, is_async_def=collector.has_await, + is_generator_expression=True) + _AssignmentExpressionChecker.do_checks(node.loop, scope_is_class=self.scope_type in ("pyclass", "cclass")) + self.visitchildren(node) + return node + + def visit_ComprehensionNode(self, node): + # enforce local scope also in Py2 for async generators (seriously, that's a Py3.6 feature...) + if not node.has_local_scope: + collector = YieldNodeCollector() + collector.visitchildren(node.loop) + if collector.has_await: + node.has_local_scope = True + _AssignmentExpressionChecker.do_checks(node.loop, scope_is_class=self.scope_type in ("pyclass", "cclass")) + self.visitchildren(node) + return node + + # cdef variables + def handle_bufferdefaults(self, decl): + if not isinstance(decl.default, ExprNodes.DictNode): + raise PostParseError(decl.pos, ERR_BUF_DEFAULTS) + self.scope_node.buffer_defaults_node = decl.default + self.scope_node.buffer_defaults_pos = decl.pos + + def visit_CVarDefNode(self, node): + # This assumes only plain names and pointers are assignable on + # declaration. Also, it makes use of the fact that a cdef decl + # must appear before the first use, so we don't have to deal with + # "i = 3; cdef int i = i" and can simply move the nodes around. + try: + self.visitchildren(node) + stats = [node] + newdecls = [] + for decl in node.declarators: + declbase = decl + while isinstance(declbase, Nodes.CPtrDeclaratorNode): + declbase = declbase.base + if isinstance(declbase, Nodes.CNameDeclaratorNode): + if declbase.default is not None: + if self.scope_type in ('cclass', 'pyclass', 'struct'): + if isinstance(self.scope_node, Nodes.CClassDefNode): + handler = self.specialattribute_handlers.get(decl.name) + if handler: + if decl is not declbase: + raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE) + handler(decl) + continue # Remove declaration + raise PostParseError(decl.pos, ERR_CDEF_INCLASS) + first_assignment = self.scope_type != 'module' + stats.append(Nodes.SingleAssignmentNode(node.pos, + lhs=ExprNodes.NameNode(node.pos, name=declbase.name), + rhs=declbase.default, first=first_assignment)) + declbase.default = None + newdecls.append(decl) + node.declarators = newdecls + return stats + except PostParseError as e: + # An error in a cdef clause is ok, simply remove the declaration + # and try to move on to report more errors + self.context.nonfatal_error(e) + return None + + # Split parallel assignments (a,b = b,a) into separate partial + # assignments that are executed rhs-first using temps. This + # restructuring must be applied before type analysis so that known + # types on rhs and lhs can be matched directly. It is required in + # the case that the types cannot be coerced to a Python type in + # order to assign from a tuple. + + def visit_SingleAssignmentNode(self, node): + self.visitchildren(node) + return self._visit_assignment_node(node, [node.lhs, node.rhs]) + + def visit_CascadedAssignmentNode(self, node): + self.visitchildren(node) + return self._visit_assignment_node(node, node.lhs_list + [node.rhs]) + + def _visit_assignment_node(self, node, expr_list): + """Flatten parallel assignments into separate single + assignments or cascaded assignments. + """ + if sum([ 1 for expr in expr_list + if expr.is_sequence_constructor or expr.is_string_literal ]) < 2: + # no parallel assignments => nothing to do + return node + + expr_list_list = [] + flatten_parallel_assignments(expr_list, expr_list_list) + temp_refs = [] + eliminate_rhs_duplicates(expr_list_list, temp_refs) + + nodes = [] + for expr_list in expr_list_list: + lhs_list = expr_list[:-1] + rhs = expr_list[-1] + if len(lhs_list) == 1: + node = Nodes.SingleAssignmentNode(rhs.pos, + lhs = lhs_list[0], rhs = rhs) + else: + node = Nodes.CascadedAssignmentNode(rhs.pos, + lhs_list = lhs_list, rhs = rhs) + nodes.append(node) + + if len(nodes) == 1: + assign_node = nodes[0] + else: + assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes) + + if temp_refs: + duplicates_and_temps = [ (temp.expression, temp) + for temp in temp_refs ] + sort_common_subsequences(duplicates_and_temps) + for _, temp_ref in duplicates_and_temps[::-1]: + assign_node = LetNode(temp_ref, assign_node) + + return assign_node + + def _flatten_sequence(self, seq, result): + for arg in seq.args: + if arg.is_sequence_constructor: + self._flatten_sequence(arg, result) + else: + result.append(arg) + return result + + def visit_DelStatNode(self, node): + self.visitchildren(node) + node.args = self._flatten_sequence(node, []) + return node + + def visit_ExceptClauseNode(self, node): + if node.is_except_as: + # except-as must delete NameNode target at the end + del_target = Nodes.DelStatNode( + node.pos, + args=[ExprNodes.NameNode( + node.target.pos, name=node.target.name)], + ignore_nonexisting=True) + node.body = Nodes.StatListNode( + node.pos, + stats=[Nodes.TryFinallyStatNode( + node.pos, + body=node.body, + finally_clause=Nodes.StatListNode( + node.pos, + stats=[del_target]))]) + self.visitchildren(node) + return node + + def visit_AssertStatNode(self, node): + """Extract the exception raising into a RaiseStatNode to simplify GIL handling. + """ + if node.exception is None: + node.exception = Nodes.RaiseStatNode( + node.pos, + exc_type=ExprNodes.NameNode(node.pos, name=EncodedString("AssertionError")), + exc_value=node.value, + exc_tb=None, + cause=None, + builtin_exc_name="AssertionError", + wrap_tuple_value=True, + ) + node.value = None + self.visitchildren(node) + return node + +class _AssignmentExpressionTargetNameFinder(TreeVisitor): + def __init__(self): + super(_AssignmentExpressionTargetNameFinder, self).__init__() + self.target_names = {} + + def find_target_names(self, target): + if target.is_name: + return [target.name] + elif target.is_sequence_constructor: + names = [] + for arg in target.args: + names.extend(self.find_target_names(arg)) + return names + # other targets are possible, but it isn't necessary to investigate them here + return [] + + def visit_ForInStatNode(self, node): + self.target_names[node] = tuple(self.find_target_names(node.target)) + self.visitchildren(node) + + def visit_ComprehensionNode(self, node): + pass # don't recurse into nested comprehensions + + def visit_LambdaNode(self, node): + pass # don't recurse into nested lambdas/generator expressions + + def visit_Node(self, node): + self.visitchildren(node) + + +class _AssignmentExpressionChecker(TreeVisitor): + """ + Enforces rules on AssignmentExpressions within generator expressions and comprehensions + """ + def __init__(self, loop_node, scope_is_class): + super(_AssignmentExpressionChecker, self).__init__() + + target_name_finder = _AssignmentExpressionTargetNameFinder() + target_name_finder.visit(loop_node) + self.target_names_dict = target_name_finder.target_names + self.in_iterator = False + self.in_nested_generator = False + self.scope_is_class = scope_is_class + self.current_target_names = () + self.all_target_names = set() + for names in self.target_names_dict.values(): + self.all_target_names.update(names) + + def _reset_state(self): + old_state = (self.in_iterator, self.in_nested_generator, self.scope_is_class, self.all_target_names, self.current_target_names) + # note: not resetting self.in_iterator here, see visit_LambdaNode() below + self.in_nested_generator = False + self.scope_is_class = False + self.current_target_names = () + self.all_target_names = set() + return old_state + + def _set_state(self, old_state): + self.in_iterator, self.in_nested_generator, self.scope_is_class, self.all_target_names, self.current_target_names = old_state + + @classmethod + def do_checks(cls, loop_node, scope_is_class): + checker = cls(loop_node, scope_is_class) + checker.visit(loop_node) + + def visit_ForInStatNode(self, node): + if self.in_nested_generator: + self.visitchildren(node) # once nested, don't do anything special + return + + current_target_names = self.current_target_names + target_name = self.target_names_dict.get(node, None) + if target_name: + self.current_target_names += target_name + + self.in_iterator = True + self.visit(node.iterator) + self.in_iterator = False + self.visitchildren(node, exclude=("iterator",)) + + self.current_target_names = current_target_names + + def visit_AssignmentExpressionNode(self, node): + if self.in_iterator: + error(node.pos, "assignment expression cannot be used in a comprehension iterable expression") + if self.scope_is_class: + error(node.pos, "assignment expression within a comprehension cannot be used in a class body") + if node.target_name in self.current_target_names: + error(node.pos, "assignment expression cannot rebind comprehension iteration variable '%s'" % + node.target_name) + elif node.target_name in self.all_target_names: + error(node.pos, "comprehension inner loop cannot rebind assignment expression target '%s'" % + node.target_name) + + def visit_LambdaNode(self, node): + # Don't reset "in_iterator" - an assignment expression in a lambda in an + # iterator is explicitly tested by the Python testcases and banned. + old_state = self._reset_state() + # the lambda node's "def_node" is not set up at this point, so we need to recurse into it explicitly. + self.visit(node.result_expr) + self._set_state(old_state) + + def visit_ComprehensionNode(self, node): + in_nested_generator = self.in_nested_generator + self.in_nested_generator = True + self.visitchildren(node) + self.in_nested_generator = in_nested_generator + + def visit_GeneratorExpressionNode(self, node): + in_nested_generator = self.in_nested_generator + self.in_nested_generator = True + # def_node isn't set up yet, so we need to visit the loop directly. + self.visit(node.loop) + self.in_nested_generator = in_nested_generator + + def visit_Node(self, node): + self.visitchildren(node) + + +def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence): + """Replace rhs items by LetRefNodes if they appear more than once. + Creates a sequence of LetRefNodes that set up the required temps + and appends them to ref_node_sequence. The input list is modified + in-place. + """ + seen_nodes = set() + ref_nodes = {} + def find_duplicates(node): + if node.is_literal or node.is_name: + # no need to replace those; can't include attributes here + # as their access is not necessarily side-effect free + return + if node in seen_nodes: + if node not in ref_nodes: + ref_node = LetRefNode(node) + ref_nodes[node] = ref_node + ref_node_sequence.append(ref_node) + else: + seen_nodes.add(node) + if node.is_sequence_constructor: + for item in node.args: + find_duplicates(item) + + for expr_list in expr_list_list: + rhs = expr_list[-1] + find_duplicates(rhs) + if not ref_nodes: + return + + def substitute_nodes(node): + if node in ref_nodes: + return ref_nodes[node] + elif node.is_sequence_constructor: + node.args = list(map(substitute_nodes, node.args)) + return node + + # replace nodes inside of the common subexpressions + for node in ref_nodes: + if node.is_sequence_constructor: + node.args = list(map(substitute_nodes, node.args)) + + # replace common subexpressions on all rhs items + for expr_list in expr_list_list: + expr_list[-1] = substitute_nodes(expr_list[-1]) + +def sort_common_subsequences(items): + """Sort items/subsequences so that all items and subsequences that + an item contains appear before the item itself. This is needed + because each rhs item must only be evaluated once, so its value + must be evaluated first and then reused when packing sequences + that contain it. + + This implies a partial order, and the sort must be stable to + preserve the original order as much as possible, so we use a + simple insertion sort (which is very fast for short sequences, the + normal case in practice). + """ + def contains(seq, x): + for item in seq: + if item is x: + return True + elif item.is_sequence_constructor and contains(item.args, x): + return True + return False + def lower_than(a,b): + return b.is_sequence_constructor and contains(b.args, a) + + for pos, item in enumerate(items): + key = item[1] # the ResultRefNode which has already been injected into the sequences + new_pos = pos + for i in range(pos-1, -1, -1): + if lower_than(key, items[i][0]): + new_pos = i + if new_pos != pos: + for i in range(pos, new_pos, -1): + items[i] = items[i-1] + items[new_pos] = item + +def unpack_string_to_character_literals(literal): + chars = [] + pos = literal.pos + stype = literal.__class__ + sval = literal.value + sval_type = sval.__class__ + for char in sval: + cval = sval_type(char) + chars.append(stype(pos, value=cval, constant_result=cval)) + return chars + +def flatten_parallel_assignments(input, output): + # The input is a list of expression nodes, representing the LHSs + # and RHS of one (possibly cascaded) assignment statement. For + # sequence constructors, rearranges the matching parts of both + # sides into a list of equivalent assignments between the + # individual elements. This transformation is applied + # recursively, so that nested structures get matched as well. + rhs = input[-1] + if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode)) + or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])): + output.append(input) + return + + complete_assignments = [] + + if rhs.is_sequence_constructor: + rhs_args = rhs.args + elif rhs.is_string_literal: + rhs_args = unpack_string_to_character_literals(rhs) + + rhs_size = len(rhs_args) + lhs_targets = [[] for _ in range(rhs_size)] + starred_assignments = [] + for lhs in input[:-1]: + if not lhs.is_sequence_constructor: + if lhs.is_starred: + error(lhs.pos, "starred assignment target must be in a list or tuple") + complete_assignments.append(lhs) + continue + lhs_size = len(lhs.args) + starred_targets = sum([1 for expr in lhs.args if expr.is_starred]) + if starred_targets > 1: + error(lhs.pos, "more than 1 starred expression in assignment") + output.append([lhs,rhs]) + continue + elif lhs_size - starred_targets > rhs_size: + error(lhs.pos, "need more than %d value%s to unpack" + % (rhs_size, (rhs_size != 1) and 's' or '')) + output.append([lhs,rhs]) + continue + elif starred_targets: + map_starred_assignment(lhs_targets, starred_assignments, + lhs.args, rhs_args) + elif lhs_size < rhs_size: + error(lhs.pos, "too many values to unpack (expected %d, got %d)" + % (lhs_size, rhs_size)) + output.append([lhs,rhs]) + continue + else: + for targets, expr in zip(lhs_targets, lhs.args): + targets.append(expr) + + if complete_assignments: + complete_assignments.append(rhs) + output.append(complete_assignments) + + # recursively flatten partial assignments + for cascade, rhs in zip(lhs_targets, rhs_args): + if cascade: + cascade.append(rhs) + flatten_parallel_assignments(cascade, output) + + # recursively flatten starred assignments + for cascade in starred_assignments: + if cascade[0].is_sequence_constructor: + flatten_parallel_assignments(cascade, output) + else: + output.append(cascade) + +def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args): + # Appends the fixed-position LHS targets to the target list that + # appear left and right of the starred argument. + # + # The starred_assignments list receives a new tuple + # (lhs_target, rhs_values_list) that maps the remaining arguments + # (those that match the starred target) to a list. + + # left side of the starred target + for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)): + if expr.is_starred: + starred = i + lhs_remaining = len(lhs_args) - i - 1 + break + targets.append(expr) + else: + raise InternalError("no starred arg found when splitting starred assignment") + + # right side of the starred target + for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:], + lhs_args[starred + 1:])): + targets.append(expr) + + # the starred target itself, must be assigned a (potentially empty) list + target = lhs_args[starred].target # unpack starred node + starred_rhs = rhs_args[starred:] + if lhs_remaining: + starred_rhs = starred_rhs[:-lhs_remaining] + if starred_rhs: + pos = starred_rhs[0].pos + else: + pos = target.pos + starred_assignments.append([ + target, ExprNodes.ListNode(pos=pos, args=starred_rhs)]) + + +class PxdPostParse(CythonTransform, SkipDeclarations): + """ + Basic interpretation/validity checking that should only be + done on pxd trees. + + A lot of this checking currently happens in the parser; but + what is listed below happens here. + + - "def" functions are let through only if they fill the + getbuffer/releasebuffer slots + + - cdef functions are let through only if they are on the + top level and are declared "inline" + """ + ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'" + ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'" + + def __call__(self, node): + self.scope_type = 'pxd' + return super(PxdPostParse, self).__call__(node) + + def visit_CClassDefNode(self, node): + old = self.scope_type + self.scope_type = 'cclass' + self.visitchildren(node) + self.scope_type = old + return node + + def visit_FuncDefNode(self, node): + # FuncDefNode always come with an implementation (without + # an imp they are CVarDefNodes..) + err = self.ERR_INLINE_ONLY + + if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass' + and node.name in ('__getbuffer__', '__releasebuffer__')): + err = None # allow these slots + + if isinstance(node, Nodes.CFuncDefNode): + if (u'inline' in node.modifiers and + self.scope_type in ('pxd', 'cclass')): + node.inline_in_pxd = True + if node.visibility != 'private': + err = self.ERR_NOGO_WITH_INLINE % node.visibility + elif node.api: + err = self.ERR_NOGO_WITH_INLINE % 'api' + else: + err = None # allow inline function + else: + err = self.ERR_INLINE_ONLY + + if err: + self.context.nonfatal_error(PostParseError(node.pos, err)) + return None + else: + return node + + +class TrackNumpyAttributes(VisitorTransform, SkipDeclarations): + # TODO: Make name handling as good as in InterpretCompilerDirectives() below - probably best to merge the two. + def __init__(self): + super(TrackNumpyAttributes, self).__init__() + self.numpy_module_names = set() + + def visit_CImportStatNode(self, node): + if node.module_name == u"numpy": + self.numpy_module_names.add(node.as_name or u"numpy") + return node + + def visit_AttributeNode(self, node): + self.visitchildren(node) + obj = node.obj + if (obj.is_name and obj.name in self.numpy_module_names) or obj.is_numpy_attribute: + node.is_numpy_attribute = True + return node + + visit_Node = VisitorTransform.recurse_to_children + + +class InterpretCompilerDirectives(CythonTransform): + """ + After parsing, directives can be stored in a number of places: + - #cython-comments at the top of the file (stored in ModuleNode) + - Command-line arguments overriding these + - @cython.directivename decorators + - with cython.directivename: statements + - replaces "cython.compiled" with BoolNode(value=True) + allowing unreachable blocks to be removed at a fairly early stage + before cython typing rules are forced on applied + + This transform is responsible for interpreting these various sources + and store the directive in two ways: + - Set the directives attribute of the ModuleNode for global directives. + - Use a CompilerDirectivesNode to override directives for a subtree. + + (The first one is primarily to not have to modify with the tree + structure, so that ModuleNode stay on top.) + + The directives are stored in dictionaries from name to value in effect. + Each such dictionary is always filled in for all possible directives, + using default values where no value is given by the user. + + The available directives are controlled in Options.py. + + Note that we have to run this prior to analysis, and so some minor + duplication of functionality has to occur: We manually track cimports + and which names the "cython" module may have been imported to. + """ + unop_method_nodes = { + 'typeof': ExprNodes.TypeofNode, + + 'operator.address': ExprNodes.AmpersandNode, + 'operator.dereference': ExprNodes.DereferenceNode, + 'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'), + 'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'), + 'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'), + 'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'), + 'operator.typeid' : ExprNodes.TypeidNode, + + # For backwards compatibility. + 'address': ExprNodes.AmpersandNode, + } + + binop_method_nodes = { + 'operator.comma' : ExprNodes.c_binop_constructor(','), + } + + special_methods = { + 'declare', 'union', 'struct', 'typedef', + 'sizeof', 'cast', 'pointer', 'compiled', + 'NULL', 'fused_type', 'parallel', + } + special_methods.update(unop_method_nodes) + + valid_cython_submodules = { + 'cimports', + 'dataclasses', + 'operator', + 'parallel', + 'view', + } + + valid_parallel_directives = { + "parallel", + "prange", + "threadid", + #"threadsavailable", + } + + def __init__(self, context, compilation_directive_defaults): + super(InterpretCompilerDirectives, self).__init__(context) + self.cython_module_names = set() + self.directive_names = {'staticmethod': 'staticmethod'} + self.parallel_directives = {} + directives = copy.deepcopy(Options.get_directive_defaults()) + for key, value in compilation_directive_defaults.items(): + directives[_unicode(key)] = copy.deepcopy(value) + self.directives = directives + + def check_directive_scope(self, pos, directive, scope): + legal_scopes = Options.directive_scopes.get(directive, None) + if legal_scopes and scope not in legal_scopes: + self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive ' + 'is not allowed in %s scope' % (directive, scope))) + return False + else: + if directive not in Options.directive_types: + error(pos, "Invalid directive: '%s'." % (directive,)) + return True + + def _check_valid_cython_module(self, pos, module_name): + if not module_name.startswith("cython."): + return + submodule = module_name.split('.', 2)[1] + if submodule in self.valid_cython_submodules: + return + + extra = "" + # This is very rarely used, so don't waste space on static tuples. + hints = [ + line.split() for line in """\ + imp cimports + cimp cimports + para parallel + parra parallel + dataclass dataclasses + """.splitlines()[:-1] + ] + for wrong, correct in hints: + if module_name.startswith("cython." + wrong): + extra = "Did you mean 'cython.%s' ?" % correct + break + if not extra: + is_simple_cython_name = submodule in Options.directive_types + if not is_simple_cython_name and not submodule.startswith("_"): + # Try to find it in the Shadow module (i.e. the pure Python namespace of cython.*). + # FIXME: use an internal reference of "cython.*" names instead of Shadow.py + from .. import Shadow + is_simple_cython_name = hasattr(Shadow, submodule) + if is_simple_cython_name: + extra = "Instead, use 'import cython' and then 'cython.%s'." % submodule + + error(pos, "'%s' is not a valid cython.* module%s%s" % ( + module_name, + ". " if extra else "", + extra, + )) + + # Set up processing and handle the cython: comments. + def visit_ModuleNode(self, node): + for key in sorted(node.directive_comments): + if not self.check_directive_scope(node.pos, key, 'module'): + self.wrong_scope_error(node.pos, key, 'module') + del node.directive_comments[key] + + self.module_scope = node.scope + + self.directives.update(node.directive_comments) + node.directives = self.directives + node.parallel_directives = self.parallel_directives + self.visitchildren(node) + node.cython_module_names = self.cython_module_names + return node + + def visit_CompilerDirectivesNode(self, node): + old_directives, self.directives = self.directives, node.directives + self.visitchildren(node) + self.directives = old_directives + return node + + # The following four functions track imports and cimports that + # begin with "cython" + def is_cython_directive(self, name): + return (name in Options.directive_types or + name in self.special_methods or + PyrexTypes.parse_basic_type(name)) + + def is_parallel_directive(self, full_name, pos): + """ + Checks to see if fullname (e.g. cython.parallel.prange) is a valid + parallel directive. If it is a star import it also updates the + parallel_directives. + """ + result = (full_name + ".").startswith("cython.parallel.") + + if result: + directive = full_name.split('.') + if full_name == u"cython.parallel": + self.parallel_directives[u"parallel"] = u"cython.parallel" + elif full_name == u"cython.parallel.*": + for name in self.valid_parallel_directives: + self.parallel_directives[name] = u"cython.parallel.%s" % name + elif (len(directive) != 3 or + directive[-1] not in self.valid_parallel_directives): + error(pos, "No such directive: %s" % full_name) + + self.module_scope.use_utility_code( + UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c")) + + return result + + def visit_CImportStatNode(self, node): + module_name = node.module_name + if module_name == u"cython.cimports": + error(node.pos, "Cannot cimport the 'cython.cimports' package directly, only submodules.") + if module_name.startswith(u"cython.cimports."): + if node.as_name and node.as_name != u'cython': + node.module_name = module_name[len(u"cython.cimports."):] + return node + error(node.pos, + "Python cimports must use 'from cython.cimports... import ...'" + " or 'import ... as ...', not just 'import ...'") + + if module_name == u"cython": + self.cython_module_names.add(node.as_name or u"cython") + elif module_name.startswith(u"cython."): + if module_name.startswith(u"cython.parallel."): + error(node.pos, node.module_name + " is not a module") + else: + self._check_valid_cython_module(node.pos, module_name) + + if module_name == u"cython.parallel": + if node.as_name and node.as_name != u"cython": + self.parallel_directives[node.as_name] = module_name + else: + self.cython_module_names.add(u"cython") + self.parallel_directives[ + u"cython.parallel"] = module_name + self.module_scope.use_utility_code( + UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c")) + elif node.as_name: + self.directive_names[node.as_name] = module_name[7:] + else: + self.cython_module_names.add(u"cython") + # if this cimport was a compiler directive, we don't + # want to leave the cimport node sitting in the tree + return None + return node + + def visit_FromCImportStatNode(self, node): + module_name = node.module_name + if module_name == u"cython.cimports" or module_name.startswith(u"cython.cimports."): + # only supported for convenience + return self._create_cimport_from_import( + node.pos, module_name, node.relative_level, node.imported_names) + elif not node.relative_level and ( + module_name == u"cython" or module_name.startswith(u"cython.")): + self._check_valid_cython_module(node.pos, module_name) + submodule = (module_name + u".")[7:] + newimp = [] + for pos, name, as_name in node.imported_names: + full_name = submodule + name + qualified_name = u"cython." + full_name + if self.is_parallel_directive(qualified_name, node.pos): + # from cython cimport parallel, or + # from cython.parallel cimport parallel, prange, ... + self.parallel_directives[as_name or name] = qualified_name + elif self.is_cython_directive(full_name): + self.directive_names[as_name or name] = full_name + elif full_name in ['dataclasses', 'typing']: + self.directive_names[as_name or name] = full_name + # unlike many directives, still treat it as a regular module + newimp.append((pos, name, as_name)) + else: + newimp.append((pos, name, as_name)) + + if not newimp: + return None + + node.imported_names = newimp + return node + + def visit_FromImportStatNode(self, node): + import_node = node.module + module_name = import_node.module_name.value + if module_name == u"cython.cimports" or module_name.startswith(u"cython.cimports."): + imported_names = [] + for name, name_node in node.items: + imported_names.append( + (name_node.pos, name, None if name == name_node.name else name_node.name)) + return self._create_cimport_from_import( + node.pos, module_name, import_node.level, imported_names) + elif module_name == u"cython" or module_name.startswith(u"cython."): + self._check_valid_cython_module(import_node.module_name.pos, module_name) + submodule = (module_name + u".")[7:] + newimp = [] + for name, name_node in node.items: + full_name = submodule + name + qualified_name = u"cython." + full_name + if self.is_parallel_directive(qualified_name, node.pos): + self.parallel_directives[name_node.name] = qualified_name + elif self.is_cython_directive(full_name): + self.directive_names[name_node.name] = full_name + else: + newimp.append((name, name_node)) + if not newimp: + return None + node.items = newimp + return node + + def _create_cimport_from_import(self, node_pos, module_name, level, imported_names): + if module_name == u"cython.cimports" or module_name.startswith(u"cython.cimports."): + module_name = EncodedString(module_name[len(u"cython.cimports."):]) # may be empty + + if module_name: + # from cython.cimports.a.b import x, y, z => from a.b cimport x, y, z + return Nodes.FromCImportStatNode( + node_pos, module_name=module_name, + relative_level=level, + imported_names=imported_names) + else: + # from cython.cimports import x, y, z => cimport x; cimport y; cimport z + return [ + Nodes.CImportStatNode( + pos, + module_name=dotted_name, + as_name=as_name, + is_absolute=level == 0) + for pos, dotted_name, as_name in imported_names + ] + + def visit_SingleAssignmentNode(self, node): + if isinstance(node.rhs, ExprNodes.ImportNode): + module_name = node.rhs.module_name.value + if module_name != u"cython" and not module_name.startswith("cython."): + return node + + node = Nodes.CImportStatNode(node.pos, module_name=module_name, as_name=node.lhs.name) + node = self.visit_CImportStatNode(node) + else: + self.visitchildren(node) + + return node + + def visit_NameNode(self, node): + if node.annotation: + self.visitchild(node, 'annotation') + if node.name in self.cython_module_names: + node.is_cython_module = True + else: + directive = self.directive_names.get(node.name) + if directive is not None: + node.cython_attribute = directive + if node.as_cython_attribute() == "compiled": + return ExprNodes.BoolNode(node.pos, value=True) # replace early so unused branches can be dropped + # before they have a chance to cause compile-errors + return node + + def visit_AttributeNode(self, node): + self.visitchildren(node) + if node.as_cython_attribute() == "compiled": + return ExprNodes.BoolNode(node.pos, value=True) # replace early so unused branches can be dropped + # before they have a chance to cause compile-errors + return node + + def visit_AnnotationNode(self, node): + # for most transforms annotations are left unvisited (because they're unevaluated) + # however, it is important to pick up compiler directives from them + if node.expr: + self.visit(node.expr) + return node + + def visit_NewExprNode(self, node): + self.visitchild(node, 'cppclass') + self.visitchildren(node) + return node + + def try_to_parse_directives(self, node): + # If node is the contents of an directive (in a with statement or + # decorator), returns a list of (directivename, value) pairs. + # Otherwise, returns None + if isinstance(node, ExprNodes.CallNode): + self.visitchild(node, 'function') + optname = node.function.as_cython_attribute() + if optname: + directivetype = Options.directive_types.get(optname) + if directivetype: + args, kwds = node.explicit_args_kwds() + directives = [] + key_value_pairs = [] + if kwds is not None and directivetype is not dict: + for keyvalue in kwds.key_value_pairs: + key, value = keyvalue + sub_optname = "%s.%s" % (optname, key.value) + if Options.directive_types.get(sub_optname): + directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos)) + else: + key_value_pairs.append(keyvalue) + if not key_value_pairs: + kwds = None + else: + kwds.key_value_pairs = key_value_pairs + if directives and not kwds and not args: + return directives + directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos)) + return directives + elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)): + self.visit(node) + optname = node.as_cython_attribute() + if optname: + directivetype = Options.directive_types.get(optname) + if directivetype is bool: + arg = ExprNodes.BoolNode(node.pos, value=True) + return [self.try_to_parse_directive(optname, [arg], None, node.pos)] + elif directivetype is None or directivetype is Options.DEFER_ANALYSIS_OF_ARGUMENTS: + return [(optname, None)] + else: + raise PostParseError( + node.pos, "The '%s' directive should be used as a function call." % optname) + return None + + def try_to_parse_directive(self, optname, args, kwds, pos): + if optname == 'np_pythran' and not self.context.cpp: + raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname) + elif optname == 'exceptval': + # default: exceptval(None, check=True) + arg_error = len(args) > 1 + check = True + if kwds and kwds.key_value_pairs: + kw = kwds.key_value_pairs[0] + if (len(kwds.key_value_pairs) == 1 and + kw.key.is_string_literal and kw.key.value == 'check' and + isinstance(kw.value, ExprNodes.BoolNode)): + check = kw.value.value + else: + arg_error = True + if arg_error: + raise PostParseError( + pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"') + return ('exceptval', (args[0] if args else None, check)) + + directivetype = Options.directive_types.get(optname) + if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode): + return optname, Options.get_directive_defaults()[optname] + elif directivetype is bool: + if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode): + raise PostParseError(pos, + 'The %s directive takes one compile-time boolean argument' % optname) + return (optname, args[0].value) + elif directivetype is int: + if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode): + raise PostParseError(pos, + 'The %s directive takes one compile-time integer argument' % optname) + return (optname, int(args[0].value)) + elif directivetype is str: + if kwds is not None or len(args) != 1 or not isinstance( + args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)): + raise PostParseError(pos, + 'The %s directive takes one compile-time string argument' % optname) + return (optname, str(args[0].value)) + elif directivetype is type: + if kwds is not None or len(args) != 1: + raise PostParseError(pos, + 'The %s directive takes one type argument' % optname) + return (optname, args[0]) + elif directivetype is dict: + if len(args) != 0: + raise PostParseError(pos, + 'The %s directive takes no prepositional arguments' % optname) + return optname, kwds.as_python_dict() + elif directivetype is list: + if kwds and len(kwds.key_value_pairs) != 0: + raise PostParseError(pos, + 'The %s directive takes no keyword arguments' % optname) + return optname, [ str(arg.value) for arg in args ] + elif callable(directivetype): + if kwds is not None or len(args) != 1 or not isinstance( + args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)): + raise PostParseError(pos, + 'The %s directive takes one compile-time string argument' % optname) + return (optname, directivetype(optname, str(args[0].value))) + elif directivetype is Options.DEFER_ANALYSIS_OF_ARGUMENTS: + # signal to pass things on without processing + return (optname, (args, kwds.as_python_dict() if kwds else {})) + else: + assert False + + def visit_with_directives(self, node, directives, contents_directives): + # contents_directives may be None + if not directives: + assert not contents_directives + return self.visit_Node(node) + + old_directives = self.directives + new_directives = Options.copy_inherited_directives(old_directives, **directives) + if contents_directives is not None: + new_contents_directives = Options.copy_inherited_directives( + old_directives, **contents_directives) + else: + new_contents_directives = new_directives + + if new_directives == old_directives: + return self.visit_Node(node) + + self.directives = new_directives + if (contents_directives is not None and + new_contents_directives != new_directives): + # we need to wrap the node body in a compiler directives node + node.body = Nodes.StatListNode( + node.body.pos, + stats=[ + Nodes.CompilerDirectivesNode( + node.body.pos, + directives=new_contents_directives, + body=node.body) + ] + ) + retbody = self.visit_Node(node) + self.directives = old_directives + + if not isinstance(retbody, Nodes.StatListNode): + retbody = Nodes.StatListNode(node.pos, stats=[retbody]) + return Nodes.CompilerDirectivesNode( + pos=retbody.pos, body=retbody, directives=new_directives) + + + # Handle decorators + def visit_FuncDefNode(self, node): + directives, contents_directives = self._extract_directives(node, 'function') + return self.visit_with_directives(node, directives, contents_directives) + + def visit_CVarDefNode(self, node): + directives, _ = self._extract_directives(node, 'function') + for name, value in directives.items(): + if name == 'locals': + node.directive_locals = value + elif name not in ('final', 'staticmethod'): + self.context.nonfatal_error(PostParseError( + node.pos, + "Cdef functions can only take cython.locals(), " + "staticmethod, or final decorators, got %s." % name)) + return self.visit_with_directives(node, directives, contents_directives=None) + + def visit_CClassDefNode(self, node): + directives, contents_directives = self._extract_directives(node, 'cclass') + return self.visit_with_directives(node, directives, contents_directives) + + def visit_CppClassNode(self, node): + directives, contents_directives = self._extract_directives(node, 'cppclass') + return self.visit_with_directives(node, directives, contents_directives) + + def visit_PyClassDefNode(self, node): + directives, contents_directives = self._extract_directives(node, 'class') + return self.visit_with_directives(node, directives, contents_directives) + + def _extract_directives(self, node, scope_name): + """ + Returns two dicts - directives applied to this function/class + and directives applied to its contents. They aren't always the + same (since e.g. cfunc should not be applied to inner functions) + """ + if not node.decorators: + return {}, {} + # Split the decorators into two lists -- real decorators and directives + directives = [] + realdecs = [] + both = [] + current_opt_dict = dict(self.directives) + missing = object() + # Decorators coming first take precedence. + for dec in node.decorators[::-1]: + new_directives = self.try_to_parse_directives(dec.decorator) + if new_directives is not None: + for directive in new_directives: + if self.check_directive_scope(node.pos, directive[0], scope_name): + name, value = directive + if name in ('nogil', 'with_gil'): + if value is None: + value = True + else: + args, kwds = value + if kwds or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode): + raise PostParseError(dec.pos, 'The %s directive takes one compile-time boolean argument' % name) + value = args[0].value + directive = (name, value) + if current_opt_dict.get(name, missing) != value: + if name == 'cfunc' and 'ufunc' in current_opt_dict: + error(dec.pos, "Cannot apply @cfunc to @ufunc, please reverse the decorators.") + directives.append(directive) + current_opt_dict[name] = value + else: + warning(dec.pos, "Directive does not change previous value (%s%s)" % ( + name, '=%r' % value if value is not None else '')) + if directive[0] == 'staticmethod': + both.append(dec) + # Adapt scope type based on decorators that change it. + if directive[0] == 'cclass' and scope_name == 'class': + scope_name = 'cclass' + else: + realdecs.append(dec) + node.decorators = realdecs[::-1] + both[::-1] + # merge or override repeated directives + optdict = {} + contents_optdict = {} + for name, value in directives: + if name in optdict: + old_value = optdict[name] + # keywords and arg lists can be merged, everything + # else overrides completely + if isinstance(old_value, dict): + old_value.update(value) + elif isinstance(old_value, list): + old_value.extend(value) + else: + optdict[name] = value + else: + optdict[name] = value + if name not in Options.immediate_decorator_directives: + contents_optdict[name] = value + return optdict, contents_optdict + + # Handle with-statements + def visit_WithStatNode(self, node): + directive_dict = {} + for directive in self.try_to_parse_directives(node.manager) or []: + if directive is not None: + if node.target is not None: + self.context.nonfatal_error( + PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'")) + else: + name, value = directive + if name in ('nogil', 'gil'): + # special case: in pure mode, "with nogil" spells "with cython.nogil" + condition = None + if isinstance(node.manager, ExprNodes.SimpleCallNode) and len(node.manager.args) > 0: + if len(node.manager.args) == 1: + condition = node.manager.args[0] + else: + self.context.nonfatal_error( + PostParseError(node.pos, "Compiler directive %s accepts one positional argument." % name)) + elif isinstance(node.manager, ExprNodes.GeneralCallNode): + self.context.nonfatal_error( + PostParseError(node.pos, "Compiler directive %s accepts one positional argument." % name)) + node = Nodes.GILStatNode(node.pos, state=name, body=node.body, condition=condition) + return self.visit_Node(node) + if self.check_directive_scope(node.pos, name, 'with statement'): + directive_dict[name] = value + if directive_dict: + return self.visit_with_directives(node.body, directive_dict, contents_directives=None) + return self.visit_Node(node) + + +class ParallelRangeTransform(CythonTransform, SkipDeclarations): + """ + Transform cython.parallel stuff. The parallel_directives come from the + module node, set there by InterpretCompilerDirectives. + + x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode + with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode + print cython.parallel.threadid() -> ParallelThreadIdNode + for i in cython.parallel.prange(...): -> ParallelRangeNode + ... + """ + + # a list of names, maps 'cython.parallel.prange' in the code to + # ['cython', 'parallel', 'prange'] + parallel_directive = None + + # Indicates whether a namenode in an expression is the cython module + namenode_is_cython_module = False + + # Keep track of whether we are the context manager of a 'with' statement + in_context_manager_section = False + + # One of 'prange' or 'with parallel'. This is used to disallow closely + # nested 'with parallel:' blocks + state = None + + directive_to_node = { + u"cython.parallel.parallel": Nodes.ParallelWithBlockNode, + # u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode, + u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode, + u"cython.parallel.prange": Nodes.ParallelRangeNode, + } + + def node_is_parallel_directive(self, node): + return node.name in self.parallel_directives or node.is_cython_module + + def get_directive_class_node(self, node): + """ + Figure out which parallel directive was used and return the associated + Node class. + + E.g. for a cython.parallel.prange() call we return ParallelRangeNode + """ + if self.namenode_is_cython_module: + directive = '.'.join(self.parallel_directive) + else: + directive = self.parallel_directives[self.parallel_directive[0]] + directive = '%s.%s' % (directive, + '.'.join(self.parallel_directive[1:])) + directive = directive.rstrip('.') + + cls = self.directive_to_node.get(directive) + if cls is None and not (self.namenode_is_cython_module and + self.parallel_directive[0] != 'parallel'): + error(node.pos, "Invalid directive: %s" % directive) + + self.namenode_is_cython_module = False + self.parallel_directive = None + + return cls + + def visit_ModuleNode(self, node): + """ + If any parallel directives were imported, copy them over and visit + the AST + """ + if node.parallel_directives: + self.parallel_directives = node.parallel_directives + return self.visit_Node(node) + + # No parallel directives were imported, so they can't be used :) + return node + + def visit_NameNode(self, node): + if self.node_is_parallel_directive(node): + self.parallel_directive = [node.name] + self.namenode_is_cython_module = node.is_cython_module + return node + + def visit_AttributeNode(self, node): + self.visitchildren(node) + if self.parallel_directive: + self.parallel_directive.append(node.attribute) + return node + + def visit_CallNode(self, node): + self.visitchild(node, 'function') + if not self.parallel_directive: + self.visitchildren(node, exclude=('function',)) + return node + + # We are a parallel directive, replace this node with the + # corresponding ParallelSomethingSomething node + + if isinstance(node, ExprNodes.GeneralCallNode): + args = node.positional_args.args + kwargs = node.keyword_args + else: + args = node.args + kwargs = {} + + parallel_directive_class = self.get_directive_class_node(node) + if parallel_directive_class: + # Note: in case of a parallel() the body is set by + # visit_WithStatNode + node = parallel_directive_class(node.pos, args=args, kwargs=kwargs) + + return node + + def visit_WithStatNode(self, node): + "Rewrite with cython.parallel.parallel() blocks" + newnode = self.visit(node.manager) + + if isinstance(newnode, Nodes.ParallelWithBlockNode): + if self.state == 'parallel with': + error(node.manager.pos, + "Nested parallel with blocks are disallowed") + + self.state = 'parallel with' + body = self.visitchild(node, 'body') + self.state = None + + newnode.body = body + return newnode + elif self.parallel_directive: + parallel_directive_class = self.get_directive_class_node(node) + + if not parallel_directive_class: + # There was an error, stop here and now + return None + + if parallel_directive_class is Nodes.ParallelWithBlockNode: + error(node.pos, "The parallel directive must be called") + return None + + self.visitchild(node, 'body') + return node + + def visit_ForInStatNode(self, node): + "Rewrite 'for i in cython.parallel.prange(...):'" + self.visitchild(node, 'iterator') + self.visitchild(node, 'target') + + in_prange = isinstance(node.iterator.sequence, + Nodes.ParallelRangeNode) + previous_state = self.state + + if in_prange: + # This will replace the entire ForInStatNode, so copy the + # attributes + parallel_range_node = node.iterator.sequence + + parallel_range_node.target = node.target + parallel_range_node.body = node.body + parallel_range_node.else_clause = node.else_clause + + node = parallel_range_node + + if not isinstance(node.target, ExprNodes.NameNode): + error(node.target.pos, + "Can only iterate over an iteration variable") + + self.state = 'prange' + + self.visitchild(node, 'body') + self.state = previous_state + self.visitchild(node, 'else_clause') + return node + + def visit(self, node): + "Visit a node that may be None" + if node is not None: + return super(ParallelRangeTransform, self).visit(node) + + +class WithTransform(VisitorTransform, SkipDeclarations): + def visit_WithStatNode(self, node): + self.visitchildren(node, 'body') + pos = node.pos + is_async = node.is_async + body, target, manager = node.body, node.target, node.manager + manager = node.manager = ExprNodes.ProxyNode(manager) + node.enter_call = ExprNodes.SimpleCallNode( + pos, function=ExprNodes.AttributeNode( + pos, obj=ExprNodes.CloneNode(manager), + attribute=EncodedString('__aenter__' if is_async else '__enter__'), + is_special_lookup=True), + args=[], + is_temp=True) + + if is_async: + node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call) + + if target is not None: + body = Nodes.StatListNode( + pos, stats=[ + Nodes.WithTargetAssignmentStatNode( + pos, lhs=target, with_node=node), + body]) + + excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[ + ExprNodes.ExcValueNode(pos) for _ in range(3)]) + except_clause = Nodes.ExceptClauseNode( + pos, body=Nodes.IfStatNode( + pos, if_clauses=[ + Nodes.IfClauseNode( + pos, condition=ExprNodes.NotNode( + pos, operand=ExprNodes.WithExitCallNode( + pos, with_stat=node, + test_if_run=False, + args=excinfo_target, + await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)), + body=Nodes.ReraiseStatNode(pos), + ), + ], + else_clause=None), + pattern=None, + target=None, + excinfo_target=excinfo_target, + ) + + node.body = Nodes.TryFinallyStatNode( + pos, body=Nodes.TryExceptStatNode( + pos, body=body, + except_clauses=[except_clause], + else_clause=None, + ), + finally_clause=Nodes.ExprStatNode( + pos, expr=ExprNodes.WithExitCallNode( + pos, with_stat=node, + test_if_run=True, + args=ExprNodes.TupleNode( + pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]), + await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)), + handle_error_case=False, + ) + return node + + def visit_ExprNode(self, node): + # With statements are never inside expressions. + return node + + visit_Node = VisitorTransform.recurse_to_children + + +class _GeneratorExpressionArgumentsMarker(TreeVisitor, SkipDeclarations): + # called from "MarkClosureVisitor" + def __init__(self, gen_expr): + super(_GeneratorExpressionArgumentsMarker, self).__init__() + self.gen_expr = gen_expr + + def visit_ExprNode(self, node): + if not node.is_literal: + # Don't bother tagging literal nodes + assert (not node.generator_arg_tag) # nobody has tagged this first + node.generator_arg_tag = self.gen_expr + self.visitchildren(node) + + def visit_Node(self, node): + # We're only interested in the expressions that make up the iterator sequence, + # so don't go beyond ExprNodes (e.g. into ForFromStatNode). + return + + def visit_GeneratorExpressionNode(self, node): + node.generator_arg_tag = self.gen_expr + # don't visit children, can't handle overlapping tags + # (and assume generator expressions don't end up optimized out in a way + # that would require overlapping tags) + + +class _HandleGeneratorArguments(VisitorTransform, SkipDeclarations): + # used from within CreateClosureClasses + + def __call__(self, node): + from . import Visitor + assert isinstance(node, ExprNodes.GeneratorExpressionNode) + self.gen_node = node + + self.args = list(node.def_node.args) + self.call_parameters = list(node.call_parameters) + self.tag_count = 0 + self.substitutions = {} + + self.visitchildren(node) + + for k, v in self.substitutions.items(): + # doing another search for replacements here (at the end) allows us to sweep up + # CloneNodes too (which are often generated by the optimizer) + # (it could arguably be done more efficiently with a single traversal though) + Visitor.recursively_replace_node(node, k, v) + + node.def_node.args = self.args + node.call_parameters = self.call_parameters + return node + + def visit_GeneratorExpressionNode(self, node): + # a generator can also be substituted itself, so handle that case + new_node = self._handle_ExprNode(node, do_visit_children=False) + # However do not traverse into it. A new _HandleGeneratorArguments visitor will be used + # elsewhere to do that. + return node + + def _handle_ExprNode(self, node, do_visit_children): + if (node.generator_arg_tag is not None and self.gen_node is not None and + self.gen_node == node.generator_arg_tag): + pos = node.pos + # The reason for using ".x" as the name is that this is how CPython + # tracks internal variables in loops (e.g. + # { locals() for v in range(10) } + # will produce "v" and ".0"). We don't replicate this behaviour completely + # but use it as a starting point + name_source = self.tag_count + self.tag_count += 1 + name = EncodedString(".{0}".format(name_source)) + def_node = self.gen_node.def_node + if not def_node.local_scope.lookup_here(name): + from . import Symtab + cname = EncodedString(Naming.genexpr_arg_prefix + Symtab.punycodify_name(str(name_source))) + name_decl = Nodes.CNameDeclaratorNode(pos=pos, name=name) + type = node.type + + # strip away cv types - they shouldn't be applied to the + # function argument or to the closure struct. + # It isn't obvious whether the right thing to do would be to capture by reference or by + # value (C++ itself doesn't know either for lambda functions and forces a choice). + # However, capture by reference involves converting to FakeReference which would require + # re-analysing AttributeNodes. Therefore I've picked capture-by-value out of convenience + # TODO - could probably be optimized by making the arg a reference but the closure not + # (see https://github.com/cython/cython/issues/2468) + type = PyrexTypes.remove_cv_ref(type, remove_fakeref=False) + + name_decl.type = type + new_arg = Nodes.CArgDeclNode(pos=pos, declarator=name_decl, + base_type=None, default=None, annotation=None) + new_arg.name = name_decl.name + new_arg.type = type + + self.args.append(new_arg) + node.generator_arg_tag = None # avoid the possibility of this being caught again + self.call_parameters.append(node) + new_arg.entry = def_node.declare_argument(def_node.local_scope, new_arg) + new_arg.entry.cname = cname + new_arg.entry.in_closure = True + + if do_visit_children: + # now visit the Nodes's children (but remove self.gen_node to not to further + # argument substitution) + gen_node, self.gen_node = self.gen_node, None + self.visitchildren(node) + self.gen_node = gen_node + + # replace the node inside the generator with a looked-up name + # (initialized_check can safely be False because the source variable will be checked + # before it is captured if the check is required) + name_node = ExprNodes.NameNode(pos, name=name, initialized_check=False) + name_node.entry = self.gen_node.def_node.gbody.local_scope.lookup(name_node.name) + name_node.type = name_node.entry.type + self.substitutions[node] = name_node + return name_node + if do_visit_children: + self.visitchildren(node) + return node + + def visit_ExprNode(self, node): + return self._handle_ExprNode(node, True) + + visit_Node = VisitorTransform.recurse_to_children + + +class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations): + """ + Transforms method decorators in cdef classes into nested calls or properties. + + Python-style decorator properties are transformed into a PropertyNode + with up to the three getter, setter and deleter DefNodes. + The functional style isn't supported yet. + """ + _properties = None + + _map_property_attribute = { + 'getter': EncodedString('__get__'), + 'setter': EncodedString('__set__'), + 'deleter': EncodedString('__del__'), + }.get + + def visit_CClassDefNode(self, node): + if self._properties is None: + self._properties = [] + self._properties.append({}) + node = super(DecoratorTransform, self).visit_CClassDefNode(node) + self._properties.pop() + return node + + def visit_PropertyNode(self, node): + # Low-level warning for other code until we can convert all our uses over. + level = 2 if isinstance(node.pos[0], str) else 0 + warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level) + return node + + def visit_CFuncDefNode(self, node): + node = self.visit_FuncDefNode(node) + if not node.decorators: + return node + elif self.scope_type != 'cclass' or self.scope_node.visibility != "extern": + # at the moment cdef functions are very restricted in what decorators they can take + # so it's simple to test for the small number of allowed decorators.... + if not (len(node.decorators) == 1 and node.decorators[0].decorator.is_name and + node.decorators[0].decorator.name == "staticmethod"): + error(node.decorators[0].pos, "Cdef functions cannot take arbitrary decorators.") + return node + + ret_node = node + decorator_node = self._find_property_decorator(node) + if decorator_node: + if decorator_node.decorator.is_name: + name = node.declared_name() + if name: + ret_node = self._add_property(node, name, decorator_node) + else: + error(decorator_node.pos, "C property decorator can only be @property") + + if node.decorators: + return self._reject_decorated_property(node, node.decorators[0]) + return ret_node + + def visit_DefNode(self, node): + scope_type = self.scope_type + node = self.visit_FuncDefNode(node) + if scope_type != 'cclass' or not node.decorators: + return node + + # transform @property decorators + decorator_node = self._find_property_decorator(node) + if decorator_node is not None: + decorator = decorator_node.decorator + if decorator.is_name: + return self._add_property(node, node.name, decorator_node) + else: + handler_name = self._map_property_attribute(decorator.attribute) + if handler_name: + if decorator.obj.name != node.name: + # CPython does not generate an error or warning, but not something useful either. + error(decorator_node.pos, + "Mismatching property names, expected '%s', got '%s'" % ( + decorator.obj.name, node.name)) + elif len(node.decorators) > 1: + return self._reject_decorated_property(node, decorator_node) + else: + return self._add_to_property(node, handler_name, decorator_node) + + # we clear node.decorators, so we need to set the + # is_staticmethod/is_classmethod attributes now + for decorator in node.decorators: + func = decorator.decorator + if func.is_name: + node.is_classmethod |= func.name == 'classmethod' + node.is_staticmethod |= func.name == 'staticmethod' + + # transform normal decorators + decs = node.decorators + node.decorators = None + return self.chain_decorators(node, decs, node.name) + + def _find_property_decorator(self, node): + properties = self._properties[-1] + for decorator_node in node.decorators[::-1]: + decorator = decorator_node.decorator + if decorator.is_name and decorator.name == 'property': + # @property + return decorator_node + elif decorator.is_attribute and decorator.obj.name in properties: + # @prop.setter etc. + return decorator_node + return None + + @staticmethod + def _reject_decorated_property(node, decorator_node): + # restrict transformation to outermost decorator as wrapped properties will probably not work + for deco in node.decorators: + if deco != decorator_node: + error(deco.pos, "Property methods with additional decorators are not supported") + return node + + def _add_property(self, node, name, decorator_node): + if len(node.decorators) > 1: + return self._reject_decorated_property(node, decorator_node) + node.decorators.remove(decorator_node) + properties = self._properties[-1] + is_cproperty = isinstance(node, Nodes.CFuncDefNode) + body = Nodes.StatListNode(node.pos, stats=[node]) + if is_cproperty: + if name in properties: + error(node.pos, "C property redeclared") + if 'inline' not in node.modifiers: + error(node.pos, "C property method must be declared 'inline'") + prop = Nodes.CPropertyNode(node.pos, doc=node.doc, name=name, body=body) + elif name in properties: + prop = properties[name] + if prop.is_cproperty: + error(node.pos, "C property redeclared") + else: + node.name = EncodedString("__get__") + prop.pos = node.pos + prop.doc = node.doc + prop.body.stats = [node] + return None + else: + node.name = EncodedString("__get__") + prop = Nodes.PropertyNode( + node.pos, name=name, doc=node.doc, body=body) + properties[name] = prop + return prop + + def _add_to_property(self, node, name, decorator): + properties = self._properties[-1] + prop = properties[node.name] + if prop.is_cproperty: + error(node.pos, "C property redeclared") + return None + node.name = name + node.decorators.remove(decorator) + stats = prop.body.stats + for i, stat in enumerate(stats): + if stat.name == name: + stats[i] = node + break + else: + stats.append(node) + return None + + @staticmethod + def chain_decorators(node, decorators, name): + """ + Decorators are applied directly in DefNode and PyClassDefNode to avoid + reassignments to the function/class name - except for cdef class methods. + For those, the reassignment is required as methods are originally + defined in the PyMethodDef struct. + + The IndirectionNode allows DefNode to override the decorator. + """ + decorator_result = ExprNodes.NameNode(node.pos, name=name) + for decorator in decorators[::-1]: + decorator_result = ExprNodes.SimpleCallNode( + decorator.pos, + function=decorator.decorator, + args=[decorator_result]) + + name_node = ExprNodes.NameNode(node.pos, name=name) + reassignment = Nodes.SingleAssignmentNode( + node.pos, + lhs=name_node, + rhs=decorator_result) + + reassignment = Nodes.IndirectionNode([reassignment]) + node.decorator_indirection = reassignment + return [node, reassignment] + + +class CnameDirectivesTransform(CythonTransform, SkipDeclarations): + """ + Only part of the CythonUtilityCode pipeline. Must be run before + DecoratorTransform in case this is a decorator for a cdef class. + It filters out @cname('my_cname') decorators and rewrites them to + CnameDecoratorNodes. + """ + + def handle_function(self, node): + if not getattr(node, 'decorators', None): + return self.visit_Node(node) + + for i, decorator in enumerate(node.decorators): + decorator = decorator.decorator + + if (isinstance(decorator, ExprNodes.CallNode) and + decorator.function.is_name and + decorator.function.name == 'cname'): + args, kwargs = decorator.explicit_args_kwds() + + if kwargs: + raise AssertionError( + "cname decorator does not take keyword arguments") + + if len(args) != 1: + raise AssertionError( + "cname decorator takes exactly one argument") + + if not (args[0].is_literal and + args[0].type == Builtin.str_type): + raise AssertionError( + "argument to cname decorator must be a string literal") + + cname = args[0].compile_time_value(None) + del node.decorators[i] + node = Nodes.CnameDecoratorNode(pos=node.pos, node=node, + cname=cname) + break + + return self.visit_Node(node) + + visit_FuncDefNode = handle_function + visit_CClassDefNode = handle_function + visit_CEnumDefNode = handle_function + visit_CStructOrUnionDefNode = handle_function + + +class ForwardDeclareTypes(CythonTransform): + """ + Declare all global cdef names that we allow referencing in other places, + before declaring everything (else) in source code order. + """ + + def visit_CompilerDirectivesNode(self, node): + env = self.module_scope + old = env.directives + env.directives = node.directives + self.visitchildren(node) + env.directives = old + return node + + def visit_ModuleNode(self, node): + self.module_scope = node.scope + self.module_scope.directives = node.directives + self.visitchildren(node) + return node + + def visit_CDefExternNode(self, node): + old_cinclude_flag = self.module_scope.in_cinclude + self.module_scope.in_cinclude = 1 + self.visitchildren(node) + self.module_scope.in_cinclude = old_cinclude_flag + return node + + def visit_CEnumDefNode(self, node): + node.declare(self.module_scope) + return node + + def visit_CStructOrUnionDefNode(self, node): + if node.name not in self.module_scope.entries: + node.declare(self.module_scope) + return node + + def visit_CClassDefNode(self, node): + if node.class_name not in self.module_scope.entries: + node.declare(self.module_scope) + # Expand fused methods of .pxd declared types to construct the final vtable order. + type = self.module_scope.entries[node.class_name].type + if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope: + scope = type.scope + for entry in scope.cfunc_entries: + if entry.type and entry.type.is_fused: + entry.type.get_all_specialized_function_types() + return node + + def visit_FuncDefNode(self, node): + # no traversal needed + return node + + def visit_PyClassDefNode(self, node): + # no traversal needed + return node + + +class AnalyseDeclarationsTransform(EnvTransform): + + basic_property = TreeFragment(u""" +property NAME: + def __get__(self): + return ATTR + def __set__(self, value): + ATTR = value + """, level='c_class', pipeline=[NormalizeTree(None)]) + basic_pyobject_property = TreeFragment(u""" +property NAME: + def __get__(self): + return ATTR + def __set__(self, value): + ATTR = value + def __del__(self): + ATTR = None + """, level='c_class', pipeline=[NormalizeTree(None)]) + basic_property_ro = TreeFragment(u""" +property NAME: + def __get__(self): + return ATTR + """, level='c_class', pipeline=[NormalizeTree(None)]) + + struct_or_union_wrapper = TreeFragment(u""" +cdef class NAME: + cdef TYPE value + def __init__(self, MEMBER=None): + cdef int count + count = 0 + INIT_ASSIGNMENTS + if IS_UNION and count > 1: + raise ValueError, "At most one union member should be specified." + def __str__(self): + return STR_FORMAT % MEMBER_TUPLE + def __repr__(self): + return REPR_FORMAT % MEMBER_TUPLE + """, pipeline=[NormalizeTree(None)]) + + init_assignment = TreeFragment(u""" +if VALUE is not None: + ATTR = VALUE + count += 1 + """, pipeline=[NormalizeTree(None)]) + + fused_function = None + in_lambda = 0 + + def __call__(self, root): + # needed to determine if a cdef var is declared after it's used. + self.seen_vars_stack = [] + self.fused_error_funcs = set() + super_class = super(AnalyseDeclarationsTransform, self) + self._super_visit_FuncDefNode = super_class.visit_FuncDefNode + return super_class.__call__(root) + + def visit_NameNode(self, node): + self.seen_vars_stack[-1].add(node.name) + return node + + def visit_ModuleNode(self, node): + # Pickling support requires injecting module-level nodes. + self.extra_module_declarations = [] + self.seen_vars_stack.append(set()) + node.analyse_declarations(self.current_env()) + self.visitchildren(node) + self.seen_vars_stack.pop() + node.body.stats.extend(self.extra_module_declarations) + return node + + def visit_LambdaNode(self, node): + self.in_lambda += 1 + node.analyse_declarations(self.current_env()) + self.visitchildren(node) + self.in_lambda -= 1 + return node + + def visit_CClassDefNode(self, node): + node = self.visit_ClassDefNode(node) + if node.scope and 'dataclasses.dataclass' in node.scope.directives: + from .Dataclass import handle_cclass_dataclass + handle_cclass_dataclass(node, node.scope.directives['dataclasses.dataclass'], self) + if node.scope and node.scope.implemented and node.body: + stats = [] + for entry in node.scope.var_entries: + if entry.needs_property: + property = self.create_Property(entry) + property.analyse_declarations(node.scope) + self.visit(property) + stats.append(property) + if stats: + node.body.stats += stats + if (node.visibility != 'extern' + and not node.scope.lookup('__reduce__') + and not node.scope.lookup('__reduce_ex__')): + self._inject_pickle_methods(node) + return node + + def _inject_pickle_methods(self, node): + env = self.current_env() + if node.scope.directives['auto_pickle'] is False: # None means attempt it. + # Old behavior of not doing anything. + return + auto_pickle_forced = node.scope.directives['auto_pickle'] is True + + all_members = [] + cls = node.entry.type + cinit = None + inherited_reduce = None + while cls is not None: + all_members.extend(e for e in cls.scope.var_entries if e.name not in ('__weakref__', '__dict__')) + cinit = cinit or cls.scope.lookup('__cinit__') + inherited_reduce = inherited_reduce or cls.scope.lookup('__reduce__') or cls.scope.lookup('__reduce_ex__') + cls = cls.base_type + all_members.sort(key=lambda e: e.name) + + if inherited_reduce: + # This is not failsafe, as we may not know whether a cimported class defines a __reduce__. + # This is why we define __reduce_cython__ and only replace __reduce__ + # (via ExtensionTypes.SetupReduce utility code) at runtime on class creation. + return + + non_py = [ + e for e in all_members + if not e.type.is_pyobject and (not e.type.can_coerce_to_pyobject(env) + or not e.type.can_coerce_from_pyobject(env)) + ] + + structs = [e for e in all_members if e.type.is_struct_or_union] + + if cinit or non_py or (structs and not auto_pickle_forced): + if cinit: + # TODO(robertwb): We could allow this if __cinit__ has no require arguments. + msg = 'no default __reduce__ due to non-trivial __cinit__' + elif non_py: + msg = "%s cannot be converted to a Python object for pickling" % ','.join("self.%s" % e.name for e in non_py) + else: + # Extern structs may be only partially defined. + # TODO(robertwb): Limit the restriction to extern + # (and recursively extern-containing) structs. + msg = ("Pickling of struct members such as %s must be explicitly requested " + "with @auto_pickle(True)" % ','.join("self.%s" % e.name for e in structs)) + + if auto_pickle_forced: + error(node.pos, msg) + + pickle_func = TreeFragment(u""" + def __reduce_cython__(self): + raise TypeError, "%(msg)s" + def __setstate_cython__(self, __pyx_state): + raise TypeError, "%(msg)s" + """ % {'msg': msg}, + level='c_class', pipeline=[NormalizeTree(None)]).substitute({}) + pickle_func.analyse_declarations(node.scope) + self.visit(pickle_func) + node.body.stats.append(pickle_func) + + else: + for e in all_members: + if not e.type.is_pyobject: + e.type.create_to_py_utility_code(env) + e.type.create_from_py_utility_code(env) + + all_members_names = [e.name for e in all_members] + checksums = _calculate_pickle_checksums(all_members_names) + + unpickle_func_name = '__pyx_unpickle_%s' % node.punycode_class_name + + # TODO(robertwb): Move the state into the third argument + # so it can be pickled *after* self is memoized. + unpickle_func = TreeFragment(u""" + def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state): + cdef object __pyx_PickleError + cdef object __pyx_result + if __pyx_checksum not in %(checksums)s: + from pickle import PickleError as __pyx_PickleError + raise __pyx_PickleError, "Incompatible checksums (0x%%x vs %(checksums)s = (%(members)s))" %% __pyx_checksum + __pyx_result = %(class_name)s.__new__(__pyx_type) + if __pyx_state is not None: + %(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state) + return __pyx_result + + cdef %(unpickle_func_name)s__set_state(%(class_name)s __pyx_result, tuple __pyx_state): + %(assignments)s + if len(__pyx_state) > %(num_members)d and hasattr(__pyx_result, '__dict__'): + __pyx_result.__dict__.update(__pyx_state[%(num_members)d]) + """ % { + 'unpickle_func_name': unpickle_func_name, + 'checksums': "(%s)" % ', '.join(checksums), + 'members': ', '.join(all_members_names), + 'class_name': node.class_name, + 'assignments': '; '.join( + '__pyx_result.%s = __pyx_state[%s]' % (v, ix) + for ix, v in enumerate(all_members_names)), + 'num_members': len(all_members_names), + }, level='module', pipeline=[NormalizeTree(None)]).substitute({}) + unpickle_func.analyse_declarations(node.entry.scope) + self.visit(unpickle_func) + self.extra_module_declarations.append(unpickle_func) + + pickle_func = TreeFragment(u""" + def __reduce_cython__(self): + cdef tuple state + cdef object _dict + cdef bint use_setstate + state = (%(members)s) + _dict = getattr(self, '__dict__', None) + if _dict is not None: + state += (_dict,) + use_setstate = True + else: + use_setstate = %(any_notnone_members)s + if use_setstate: + return %(unpickle_func_name)s, (type(self), %(checksum)s, None), state + else: + return %(unpickle_func_name)s, (type(self), %(checksum)s, state) + + def __setstate_cython__(self, __pyx_state): + %(unpickle_func_name)s__set_state(self, __pyx_state) + """ % { + 'unpickle_func_name': unpickle_func_name, + 'checksum': checksums[0], + 'members': ', '.join('self.%s' % v for v in all_members_names) + (',' if len(all_members_names) == 1 else ''), + # Even better, we could check PyType_IS_GC. + 'any_notnone_members' : ' or '.join(['self.%s is not None' % e.name for e in all_members if e.type.is_pyobject] or ['False']), + }, + level='c_class', pipeline=[NormalizeTree(None)]).substitute({}) + pickle_func.analyse_declarations(node.scope) + self.enter_scope(node, node.scope) # functions should be visited in the class scope + self.visit(pickle_func) + self.exit_scope() + node.body.stats.append(pickle_func) + + def _handle_fused_def_decorators(self, old_decorators, env, node): + """ + Create function calls to the decorators and reassignments to + the function. + """ + # Delete staticmethod and classmethod decorators, this is + # handled directly by the fused function object. + decorators = [] + for decorator in old_decorators: + func = decorator.decorator + if (not func.is_name or + func.name not in ('staticmethod', 'classmethod') or + env.lookup_here(func.name)): + # not a static or classmethod + decorators.append(decorator) + + if decorators: + transform = DecoratorTransform(self.context) + def_node = node.node + _, reassignments = transform.chain_decorators( + def_node, decorators, def_node.name) + reassignments.analyse_declarations(env) + node = [node, reassignments] + + return node + + def _handle_def(self, decorators, env, node): + "Handle def or cpdef fused functions" + # Create PyCFunction nodes for each specialization + node.stats.insert(0, node.py_func) + self.visitchild(node, 'py_func') + node.update_fused_defnode_entry(env) + # For the moment, fused functions do not support METH_FASTCALL + node.py_func.entry.signature.use_fastcall = False + pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True) + pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env)) + node.resulting_fused_function = pycfunc + # Create assignment node for our def function + node.fused_func_assignment = self._create_assignment( + node.py_func, ExprNodes.CloneNode(pycfunc), env) + + if decorators: + node = self._handle_fused_def_decorators(decorators, env, node) + + return node + + def _create_fused_function(self, env, node): + "Create a fused function for a DefNode with fused arguments" + from . import FusedNode + + if self.fused_function or self.in_lambda: + if self.fused_function not in self.fused_error_funcs: + if self.in_lambda: + error(node.pos, "Fused lambdas not allowed") + else: + error(node.pos, "Cannot nest fused functions") + + self.fused_error_funcs.add(self.fused_function) + + node.body = Nodes.PassStatNode(node.pos) + for arg in node.args: + if arg.type.is_fused: + arg.type = arg.type.get_fused_types()[0] + + return node + + decorators = getattr(node, 'decorators', None) + node = FusedNode.FusedCFuncDefNode(node, env) + self.fused_function = node + self.visitchildren(node) + self.fused_function = None + if node.py_func: + node = self._handle_def(decorators, env, node) + + return node + + def _handle_fused(self, node): + if node.is_generator and node.has_fused_arguments: + node.has_fused_arguments = False + error(node.pos, "Fused generators not supported") + node.gbody = Nodes.StatListNode(node.pos, + stats=[], + body=Nodes.PassStatNode(node.pos)) + + return node.has_fused_arguments + + def visit_FuncDefNode(self, node): + """ + Analyse a function and its body, as that hasn't happened yet. Also + analyse the directive_locals set by @cython.locals(). + + Then, if we are a function with fused arguments, replace the function + (after it has declared itself in the symbol table!) with a + FusedCFuncDefNode, and analyse its children (which are in turn normal + functions). If we're a normal function, just analyse the body of the + function. + """ + env = self.current_env() + + self.seen_vars_stack.append(set()) + lenv = node.local_scope + node.declare_arguments(lenv) + + # @cython.locals(...) + for var, type_node in node.directive_locals.items(): + if not lenv.lookup_here(var): # don't redeclare args + type = type_node.analyse_as_type(lenv) + if type and type.is_fused and lenv.fused_to_specific: + type = type.specialize(lenv.fused_to_specific) + if type: + lenv.declare_var(var, type, type_node.pos) + else: + error(type_node.pos, "Not a type") + + if self._handle_fused(node): + node = self._create_fused_function(env, node) + else: + node.body.analyse_declarations(lenv) + self._super_visit_FuncDefNode(node) + + self.seen_vars_stack.pop() + + if "ufunc" in lenv.directives: + from . import UFuncs + return UFuncs.convert_to_ufunc(node) + return node + + def visit_DefNode(self, node): + node = self.visit_FuncDefNode(node) + env = self.current_env() + if (not isinstance(node, Nodes.DefNode) or + node.fused_py_func or node.is_generator_body or + not node.needs_assignment_synthesis(env)): + return node + return [node, self._synthesize_assignment(node, env)] + + def visit_GeneratorBodyDefNode(self, node): + return self.visit_FuncDefNode(node) + + def _synthesize_assignment(self, node, env): + # Synthesize assignment node and put it right after defnode + genv = env + while genv.is_py_class_scope or genv.is_c_class_scope: + genv = genv.outer_scope + + if genv.is_closure_scope: + rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode( + node.pos, def_node=node, + pymethdef_cname=node.entry.pymethdef_cname, + code_object=ExprNodes.CodeObjectNode(node)) + else: + binding = self.current_directives.get('binding') + rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding) + node.code_object = rhs.code_object + if node.is_generator: + node.gbody.code_object = node.code_object + + if env.is_py_class_scope: + rhs.binding = True + + node.is_cyfunction = rhs.binding + return self._create_assignment(node, rhs, env) + + def _create_assignment(self, def_node, rhs, env): + if def_node.decorators: + for decorator in def_node.decorators[::-1]: + rhs = ExprNodes.SimpleCallNode( + decorator.pos, + function = decorator.decorator, + args = [rhs]) + def_node.decorators = None + + assmt = Nodes.SingleAssignmentNode( + def_node.pos, + lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name), + rhs=rhs) + assmt.analyse_declarations(env) + return assmt + + def visit_func_outer_attrs(self, node): + # any names in the outer attrs should not be looked up in the function "seen_vars_stack" + stack = self.seen_vars_stack.pop() + super(AnalyseDeclarationsTransform, self).visit_func_outer_attrs(node) + self.seen_vars_stack.append(stack) + + def visit_ScopedExprNode(self, node): + env = self.current_env() + node.analyse_declarations(env) + # the node may or may not have a local scope + if node.expr_scope: + self.seen_vars_stack.append(set(self.seen_vars_stack[-1])) + self.enter_scope(node, node.expr_scope) + node.analyse_scoped_declarations(node.expr_scope) + self.visitchildren(node) + self.exit_scope() + self.seen_vars_stack.pop() + else: + + node.analyse_scoped_declarations(env) + self.visitchildren(node) + return node + + def visit_TempResultFromStatNode(self, node): + self.visitchildren(node) + node.analyse_declarations(self.current_env()) + return node + + def visit_CppClassNode(self, node): + if node.visibility == 'extern': + return None + else: + return self.visit_ClassDefNode(node) + + def visit_CStructOrUnionDefNode(self, node): + # Create a wrapper node if needed. + # We want to use the struct type information (so it can't happen + # before this phase) but also create new objects to be declared + # (so it can't happen later). + # Note that we don't return the original node, as it is + # never used after this phase. + if True: # private (default) + return None + + self_value = ExprNodes.AttributeNode( + pos = node.pos, + obj = ExprNodes.NameNode(pos=node.pos, name=u"self"), + attribute = EncodedString(u"value")) + var_entries = node.entry.type.scope.var_entries + attributes = [] + for entry in var_entries: + attributes.append(ExprNodes.AttributeNode(pos = entry.pos, + obj = self_value, + attribute = entry.name)) + # __init__ assignments + init_assignments = [] + for entry, attr in zip(var_entries, attributes): + # TODO: branch on visibility + init_assignments.append(self.init_assignment.substitute({ + u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name), + u"ATTR": attr, + }, pos = entry.pos)) + + # create the class + str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2]) + wrapper_class = self.struct_or_union_wrapper.substitute({ + u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments), + u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct), + u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes), + u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)), + u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))), + }, pos = node.pos).stats[0] + wrapper_class.class_name = node.name + wrapper_class.shadow = True + class_body = wrapper_class.body.stats + + # fix value type + assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode) + class_body[0].base_type.name = node.name + + # fix __init__ arguments + init_method = class_body[1] + assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__' + arg_template = init_method.args[1] + if not node.entry.type.is_struct: + arg_template.kw_only = True + del init_method.args[1] + for entry, attr in zip(var_entries, attributes): + arg = copy.deepcopy(arg_template) + arg.declarator.name = entry.name + init_method.args.append(arg) + + # setters/getters + for entry, attr in zip(var_entries, attributes): + # TODO: branch on visibility + if entry.type.is_pyobject: + template = self.basic_pyobject_property + else: + template = self.basic_property + property = template.substitute({ + u"ATTR": attr, + }, pos = entry.pos).stats[0] + property.name = entry.name + wrapper_class.body.stats.append(property) + + wrapper_class.analyse_declarations(self.current_env()) + return self.visit_CClassDefNode(wrapper_class) + + # Some nodes are no longer needed after declaration + # analysis and can be dropped. The analysis was performed + # on these nodes in a separate recursive process from the + # enclosing function or module, so we can simply drop them. + def visit_CDeclaratorNode(self, node): + # necessary to ensure that all CNameDeclaratorNodes are visited. + self.visitchildren(node) + return node + + def visit_CTypeDefNode(self, node): + return node + + def visit_CBaseTypeNode(self, node): + return None + + def visit_CEnumDefNode(self, node): + if node.visibility == 'public': + return node + else: + return None + + def visit_CNameDeclaratorNode(self, node): + if node.name in self.seen_vars_stack[-1]: + entry = self.current_env().lookup(node.name) + if (entry is None or entry.visibility != 'extern' + and not entry.scope.is_c_class_scope): + error(node.pos, "cdef variable '%s' declared after it is used" % node.name) + self.visitchildren(node) + return node + + def visit_CVarDefNode(self, node): + # to ensure all CNameDeclaratorNodes are visited. + self.visitchildren(node) + return None + + def visit_CnameDecoratorNode(self, node): + child_node = self.visitchild(node, 'node') + if not child_node: + return None + if type(child_node) is list: # Assignment synthesized + node.node = child_node[0] + return [node] + child_node[1:] + return node + + def create_Property(self, entry): + if entry.visibility == 'public': + if entry.type.is_pyobject: + template = self.basic_pyobject_property + else: + template = self.basic_property + elif entry.visibility == 'readonly': + template = self.basic_property_ro + property = template.substitute({ + u"ATTR": ExprNodes.AttributeNode(pos=entry.pos, + obj=ExprNodes.NameNode(pos=entry.pos, name="self"), + attribute=entry.name), + }, pos=entry.pos).stats[0] + property.name = entry.name + property.doc = entry.doc + return property + + def visit_AssignmentExpressionNode(self, node): + self.visitchildren(node) + node.analyse_declarations(self.current_env()) + return node + + +def _calculate_pickle_checksums(member_names): + # Cython 0.x used MD5 for the checksum, which a few Python installations remove for security reasons. + # SHA-256 should be ok for years to come, but early Cython 3.0 alpha releases used SHA-1, + # which may not be. + member_names_string = ' '.join(member_names).encode('utf-8') + hash_kwargs = {'usedforsecurity': False} if sys.version_info >= (3, 9) else {} + checksums = [] + for algo_name in ['sha256', 'sha1', 'md5']: + try: + mkchecksum = getattr(hashlib, algo_name) + checksum = mkchecksum(member_names_string, **hash_kwargs).hexdigest() + except (AttributeError, ValueError): + # The algorithm (i.e. MD5) might not be there at all, or might be blocked at runtime. + continue + checksums.append('0x' + checksum[:7]) + return checksums + + +class CalculateQualifiedNamesTransform(EnvTransform): + """ + Calculate and store the '__qualname__' and the global + module name on some nodes. + """ + needs_qualname_assignment = False + needs_module_assignment = False + + def visit_ModuleNode(self, node): + self.module_name = self.global_scope().qualified_name + self.qualified_name = [] + _super = super(CalculateQualifiedNamesTransform, self) + self._super_visit_FuncDefNode = _super.visit_FuncDefNode + self._super_visit_ClassDefNode = _super.visit_ClassDefNode + self.visitchildren(node) + return node + + def _set_qualname(self, node, name=None): + if name: + qualname = self.qualified_name[:] + qualname.append(name) + else: + qualname = self.qualified_name + node.qualname = EncodedString('.'.join(qualname)) + node.module_name = self.module_name + + def _append_entry(self, entry): + if entry.is_pyglobal and not entry.is_pyclass_attr: + self.qualified_name = [entry.name] + else: + self.qualified_name.append(entry.name) + + def visit_ClassNode(self, node): + self._set_qualname(node, node.name) + self.visitchildren(node) + return node + + def visit_PyClassNamespaceNode(self, node): + # class name was already added by parent node + self._set_qualname(node) + self.visitchildren(node) + return node + + def visit_PyCFunctionNode(self, node): + orig_qualified_name = self.qualified_name[:] + if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '': + self.qualified_name.pop() + self._set_qualname(node) + else: + self._set_qualname(node, node.def_node.name) + self.visitchildren(node) + self.qualified_name = orig_qualified_name + return node + + def visit_DefNode(self, node): + if node.is_wrapper and self.qualified_name: + assert self.qualified_name[-1] == '', self.qualified_name + orig_qualified_name = self.qualified_name[:] + self.qualified_name.pop() + self._set_qualname(node) + self._super_visit_FuncDefNode(node) + self.qualified_name = orig_qualified_name + else: + self._set_qualname(node, node.name) + self.visit_FuncDefNode(node) + return node + + def visit_FuncDefNode(self, node): + orig_qualified_name = self.qualified_name[:] + if getattr(node, 'name', None) == '': + self.qualified_name.append('') + else: + self._append_entry(node.entry) + self.qualified_name.append('') + self._super_visit_FuncDefNode(node) + self.qualified_name = orig_qualified_name + return node + + def generate_assignment(self, node, name, value): + entry = node.scope.lookup_here(name) + lhs = ExprNodes.NameNode( + node.pos, + name = EncodedString(name), + entry=entry) + rhs = ExprNodes.StringNode( + node.pos, + value=value.as_utf8_string(), + unicode_value=value) + node.body.stats.insert(0, Nodes.SingleAssignmentNode( + node.pos, + lhs=lhs, + rhs=rhs, + ).analyse_expressions(self.current_env())) + + def visit_ClassDefNode(self, node): + orig_needs_qualname_assignment = self.needs_qualname_assignment + self.needs_qualname_assignment = False + orig_needs_module_assignment = self.needs_module_assignment + self.needs_module_assignment = False + orig_qualified_name = self.qualified_name[:] + entry = (getattr(node, 'entry', None) or # PyClass + self.current_env().lookup_here(node.target.name)) # CClass + self._append_entry(entry) + self._super_visit_ClassDefNode(node) + if self.needs_qualname_assignment: + self.generate_assignment(node, "__qualname__", + EncodedString(".".join(self.qualified_name))) + if self.needs_module_assignment: + self.generate_assignment(node, "__module__", + EncodedString(self.module_name)) + self.qualified_name = orig_qualified_name + self.needs_qualname_assignment = orig_needs_qualname_assignment + self.needs_module_assignment = orig_needs_module_assignment + return node + + def visit_NameNode(self, node): + scope = self.current_env() + if scope.is_c_class_scope: + # unlike for a PyClass scope, these attributes aren't defined in the + # dictionary when the class definition is executed, therefore we ask + # the compiler to generate an assignment to them at the start of the + # body. + # NOTE: this doesn't put them in locals() + if node.name == "__qualname__": + self.needs_qualname_assignment = True + elif node.name == "__module__": + self.needs_module_assignment = True + return node + + +class AnalyseExpressionsTransform(CythonTransform): + + def visit_ModuleNode(self, node): + node.scope.infer_types() + node.body = node.body.analyse_expressions(node.scope) + self.visitchildren(node) + return node + + def visit_FuncDefNode(self, node): + node.local_scope.infer_types() + node.body = node.body.analyse_expressions(node.local_scope) + self.visitchildren(node) + return node + + def visit_ScopedExprNode(self, node): + if node.has_local_scope: + node.expr_scope.infer_types() + node = node.analyse_scoped_expressions(node.expr_scope) + self.visitchildren(node) + return node + + def visit_IndexNode(self, node): + """ + Replace index nodes used to specialize cdef functions with fused + argument types with the Attribute- or NameNode referring to the + function. We then need to copy over the specialization properties to + the attribute or name node. + + Because the indexing might be a Python indexing operation on a fused + function, or (usually) a Cython indexing operation, we need to + re-analyse the types. + """ + self.visit_Node(node) + if node.is_fused_index and not node.type.is_error: + node = node.base + return node + + +class FindInvalidUseOfFusedTypes(CythonTransform): + + def visit_FuncDefNode(self, node): + # Errors related to use in functions with fused args will already + # have been detected + if not node.has_fused_arguments: + if not node.is_generator_body and node.return_type.is_fused: + error(node.pos, "Return type is not specified as argument type") + else: + self.visitchildren(node) + + return node + + def visit_ExprNode(self, node): + if node.type and node.type.is_fused: + error(node.pos, "Invalid use of fused types, type cannot be specialized") + else: + self.visitchildren(node) + + return node + + +class ExpandInplaceOperators(EnvTransform): + + def visit_InPlaceAssignmentNode(self, node): + lhs = node.lhs + rhs = node.rhs + if lhs.type.is_cpp_class: + # No getting around this exact operator here. + return node + if isinstance(lhs, ExprNodes.BufferIndexNode): + # There is code to handle this case in InPlaceAssignmentNode + return node + + env = self.current_env() + def side_effect_free_reference(node, setting=False): + if node.is_name: + return node, [] + elif node.type.is_pyobject and not setting: + node = LetRefNode(node) + return node, [node] + elif node.is_subscript: + base, temps = side_effect_free_reference(node.base) + index = LetRefNode(node.index) + return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index] + elif node.is_attribute: + obj, temps = side_effect_free_reference(node.obj) + return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps + elif isinstance(node, ExprNodes.BufferIndexNode): + raise ValueError("Don't allow things like attributes of buffer indexing operations") + else: + node = LetRefNode(node) + return node, [node] + try: + lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True) + except ValueError: + return node + dup = lhs.__class__(**lhs.__dict__) + binop = ExprNodes.binop_node(node.pos, + operator = node.operator, + operand1 = dup, + operand2 = rhs, + inplace=True) + # Manually analyse types for new node. + lhs = lhs.analyse_target_types(env) + dup.analyse_types(env) # FIXME: no need to reanalyse the copy, right? + binop.analyse_operation(env) + node = Nodes.SingleAssignmentNode( + node.pos, + lhs = lhs, + rhs=binop.coerce_to(lhs.type, env)) + # Use LetRefNode to avoid side effects. + let_ref_nodes.reverse() + for t in let_ref_nodes: + node = LetNode(t, node) + return node + + def visit_ExprNode(self, node): + # In-place assignments can't happen within an expression. + return node + + +class AdjustDefByDirectives(CythonTransform, SkipDeclarations): + """ + Adjust function and class definitions by the decorator directives: + + @cython.cfunc + @cython.cclass + @cython.ccall + @cython.inline + @cython.nogil + """ + # list of directives that cause conversion to cclass + converts_to_cclass = ('cclass', 'total_ordering', 'dataclasses.dataclass') + + def visit_ModuleNode(self, node): + self.directives = node.directives + self.in_py_class = False + self.visitchildren(node) + return node + + def visit_CompilerDirectivesNode(self, node): + old_directives = self.directives + self.directives = node.directives + self.visitchildren(node) + self.directives = old_directives + return node + + def visit_DefNode(self, node): + modifiers = [] + if 'inline' in self.directives: + modifiers.append('inline') + nogil = self.directives.get('nogil') + with_gil = self.directives.get('with_gil') + except_val = self.directives.get('exceptval') + has_explicit_exc_clause = False if except_val is None else True + return_type_node = self.directives.get('returns') + if return_type_node is None and self.directives['annotation_typing']: + return_type_node = node.return_type_annotation + # for Python annotations, prefer safe exception handling by default + if return_type_node is not None and except_val is None: + except_val = (None, True) # except * + elif except_val is None: + # backward compatible default: no exception check, unless there's also a "@returns" declaration + except_val = (None, True if return_type_node else False) + if 'ccall' in self.directives: + if 'cfunc' in self.directives: + error(node.pos, "cfunc and ccall directives cannot be combined") + if with_gil: + error(node.pos, "ccall functions cannot be declared 'with_gil'") + node = node.as_cfunction( + overridable=True, modifiers=modifiers, nogil=nogil, + returns=return_type_node, except_val=except_val, has_explicit_exc_clause=has_explicit_exc_clause) + return self.visit(node) + if 'cfunc' in self.directives: + if self.in_py_class: + error(node.pos, "cfunc directive is not allowed here") + else: + node = node.as_cfunction( + overridable=False, modifiers=modifiers, nogil=nogil, with_gil=with_gil, + returns=return_type_node, except_val=except_val, has_explicit_exc_clause=has_explicit_exc_clause) + return self.visit(node) + if 'inline' in modifiers: + error(node.pos, "Python functions cannot be declared 'inline'") + if nogil: + # TODO: turn this into a "with gil" declaration. + error(node.pos, "Python functions cannot be declared 'nogil'") + if with_gil: + error(node.pos, "Python functions cannot be declared 'with_gil'") + self.visitchildren(node) + return node + + def visit_LambdaNode(self, node): + # No directives should modify lambdas or generator expressions (and also nothing in them). + return node + + def visit_PyClassDefNode(self, node): + if any(directive in self.directives for directive in self.converts_to_cclass): + node = node.as_cclass() + return self.visit(node) + else: + old_in_pyclass = self.in_py_class + self.in_py_class = True + self.visitchildren(node) + self.in_py_class = old_in_pyclass + return node + + def visit_CClassDefNode(self, node): + old_in_pyclass = self.in_py_class + self.in_py_class = False + self.visitchildren(node) + self.in_py_class = old_in_pyclass + return node + + +class AlignFunctionDefinitions(CythonTransform): + """ + This class takes the signatures from a .pxd file and applies them to + the def methods in a .py file. + """ + + def visit_ModuleNode(self, node): + self.scope = node.scope + self.visitchildren(node) + return node + + def visit_PyClassDefNode(self, node): + pxd_def = self.scope.lookup(node.name) + if pxd_def: + if pxd_def.is_cclass: + return self.visit_CClassDefNode(node.as_cclass(), pxd_def) + elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope: + error(node.pos, "'%s' redeclared" % node.name) + if pxd_def.pos: + error(pxd_def.pos, "previous declaration here") + return None + return node + + def visit_CClassDefNode(self, node, pxd_def=None): + if pxd_def is None: + pxd_def = self.scope.lookup(node.class_name) + if pxd_def: + if not pxd_def.defined_in_pxd: + return node + outer_scope = self.scope + self.scope = pxd_def.type.scope + self.visitchildren(node) + if pxd_def: + self.scope = outer_scope + return node + + def visit_DefNode(self, node): + pxd_def = self.scope.lookup(node.name) + if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope): + if not pxd_def.is_cfunction: + error(node.pos, "'%s' redeclared" % node.name) + if pxd_def.pos: + error(pxd_def.pos, "previous declaration here") + return None + node = node.as_cfunction(pxd_def) + # Enable this when nested cdef functions are allowed. + # self.visitchildren(node) + return node + + def visit_ExprNode(self, node): + # ignore lambdas and everything else that appears in expressions + return node + + +class AutoCpdefFunctionDefinitions(CythonTransform): + + def visit_ModuleNode(self, node): + self.directives = node.directives + self.imported_names = set() # hack, see visit_FromImportStatNode() + self.scope = node.scope + self.visitchildren(node) + return node + + def visit_DefNode(self, node): + if (self.scope.is_module_scope and self.directives['auto_cpdef'] + and node.name not in self.imported_names + and node.is_cdef_func_compatible()): + # FIXME: cpdef-ing should be done in analyse_declarations() + node = node.as_cfunction(scope=self.scope) + return node + + def visit_CClassDefNode(self, node, pxd_def=None): + if pxd_def is None: + pxd_def = self.scope.lookup(node.class_name) + if pxd_def: + if not pxd_def.defined_in_pxd: + return node + outer_scope = self.scope + self.scope = pxd_def.type.scope + self.visitchildren(node) + if pxd_def: + self.scope = outer_scope + return node + + def visit_FromImportStatNode(self, node): + # hack to prevent conditional import fallback functions from + # being cdpef-ed (global Python variables currently conflict + # with imports) + if self.scope.is_module_scope: + for name, _ in node.items: + self.imported_names.add(name) + return node + + def visit_ExprNode(self, node): + # ignore lambdas and everything else that appears in expressions + return node + + +class RemoveUnreachableCode(CythonTransform): + def visit_StatListNode(self, node): + if not self.current_directives['remove_unreachable']: + return node + self.visitchildren(node) + for idx, stat in enumerate(node.stats, 1): + if stat.is_terminator: + if idx < len(node.stats): + if self.current_directives['warn.unreachable']: + warning(node.stats[idx].pos, "Unreachable code", 2) + node.stats = node.stats[:idx] + node.is_terminator = True + break + return node + + def visit_IfClauseNode(self, node): + self.visitchildren(node) + if node.body.is_terminator: + node.is_terminator = True + return node + + def visit_IfStatNode(self, node): + self.visitchildren(node) + if node.else_clause and node.else_clause.is_terminator: + for clause in node.if_clauses: + if not clause.is_terminator: + break + else: + node.is_terminator = True + return node + + def visit_TryExceptStatNode(self, node): + self.visitchildren(node) + if node.body.is_terminator and node.else_clause: + if self.current_directives['warn.unreachable']: + warning(node.else_clause.pos, "Unreachable code", 2) + node.else_clause = None + return node + + def visit_TryFinallyStatNode(self, node): + self.visitchildren(node) + if node.finally_clause.is_terminator: + node.is_terminator = True + return node + + +class YieldNodeCollector(TreeVisitor): + + def __init__(self, excludes=[]): + super(YieldNodeCollector, self).__init__() + self.yields = [] + self.returns = [] + self.finallys = [] + self.excepts = [] + self.has_return_value = False + self.has_yield = False + self.has_await = False + self.excludes = excludes + + def visit_Node(self, node): + if node not in self.excludes: + self.visitchildren(node) + + def visit_YieldExprNode(self, node): + self.yields.append(node) + self.has_yield = True + self.visitchildren(node) + + def visit_AwaitExprNode(self, node): + self.yields.append(node) + self.has_await = True + self.visitchildren(node) + + def visit_ReturnStatNode(self, node): + self.visitchildren(node) + if node.value: + self.has_return_value = True + self.returns.append(node) + + def visit_TryFinallyStatNode(self, node): + self.visitchildren(node) + self.finallys.append(node) + + def visit_TryExceptStatNode(self, node): + self.visitchildren(node) + self.excepts.append(node) + + def visit_ClassDefNode(self, node): + pass + + def visit_FuncDefNode(self, node): + pass + + def visit_LambdaNode(self, node): + pass + + def visit_GeneratorExpressionNode(self, node): + # node.loop iterator is evaluated outside the generator expression + if isinstance(node.loop, Nodes._ForInStatNode): + # Possibly should handle ForFromStatNode + # but for now do nothing + self.visit(node.loop.iterator) + + def visit_CArgDeclNode(self, node): + # do not look into annotations + # FIXME: support (yield) in default arguments (currently crashes) + pass + + +class MarkClosureVisitor(CythonTransform): + # In addition to marking closures this is also responsible to finding parts of the + # generator iterable and marking them + + def visit_ModuleNode(self, node): + self.needs_closure = False + self.excludes = [] + self.visitchildren(node) + return node + + def visit_FuncDefNode(self, node): + self.needs_closure = False + self.visitchildren(node) + node.needs_closure = self.needs_closure + self.needs_closure = True + + collector = YieldNodeCollector(self.excludes) + collector.visitchildren(node) + + if node.is_async_def: + coroutine_type = Nodes.AsyncDefNode + if collector.has_yield: + coroutine_type = Nodes.AsyncGenNode + for yield_expr in collector.yields + collector.returns: + yield_expr.in_async_gen = True + elif self.current_directives['iterable_coroutine']: + coroutine_type = Nodes.IterableAsyncDefNode + elif collector.has_await: + found = next(y for y in collector.yields if y.is_await) + error(found.pos, "'await' not allowed in generators (use 'yield')") + return node + elif collector.has_yield: + coroutine_type = Nodes.GeneratorDefNode + else: + return node + + for i, yield_expr in enumerate(collector.yields, 1): + yield_expr.label_num = i + for retnode in collector.returns + collector.finallys + collector.excepts: + retnode.in_generator = True + + gbody = Nodes.GeneratorBodyDefNode( + pos=node.pos, name=node.name, body=node.body, + is_async_gen_body=node.is_async_def and collector.has_yield) + coroutine = coroutine_type( + pos=node.pos, name=node.name, args=node.args, + star_arg=node.star_arg, starstar_arg=node.starstar_arg, + doc=node.doc, decorators=node.decorators, + gbody=gbody, lambda_name=node.lambda_name, + return_type_annotation=node.return_type_annotation, + is_generator_expression=node.is_generator_expression) + return coroutine + + def visit_CFuncDefNode(self, node): + self.needs_closure = False + self.visitchildren(node) + node.needs_closure = self.needs_closure + self.needs_closure = True + if node.needs_closure and node.overridable: + error(node.pos, "closures inside cpdef functions not yet supported") + return node + + def visit_LambdaNode(self, node): + self.needs_closure = False + self.visitchildren(node) + node.needs_closure = self.needs_closure + self.needs_closure = True + return node + + def visit_ClassDefNode(self, node): + self.visitchildren(node) + self.needs_closure = True + return node + + def visit_GeneratorExpressionNode(self, node): + excludes = self.excludes + if isinstance(node.loop, Nodes._ForInStatNode): + self.excludes = [node.loop.iterator] + node = self.visit_LambdaNode(node) + self.excludes = excludes + if not isinstance(node.loop, Nodes._ForInStatNode): + # Possibly should handle ForFromStatNode + # but for now do nothing + return node + itseq = node.loop.iterator.sequence + # literals do not need replacing with an argument + if itseq.is_literal: + return node + _GeneratorExpressionArgumentsMarker(node).visit(itseq) + return node + + +class CreateClosureClasses(CythonTransform): + # Output closure classes in module scope for all functions + # that really need it. + + def __init__(self, context): + super(CreateClosureClasses, self).__init__(context) + self.path = [] + self.in_lambda = False + + def visit_ModuleNode(self, node): + self.module_scope = node.scope + self.visitchildren(node) + return node + + def find_entries_used_in_closures(self, node): + from_closure = [] + in_closure = [] + for scope in node.local_scope.iter_local_scopes(): + for name, entry in scope.entries.items(): + if not name: + continue + if entry.from_closure: + from_closure.append((name, entry)) + elif entry.in_closure: + in_closure.append((name, entry)) + return from_closure, in_closure + + def create_class_from_scope(self, node, target_module_scope, inner_node=None): + # move local variables into closure + if node.is_generator: + for scope in node.local_scope.iter_local_scopes(): + for entry in scope.entries.values(): + if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal): + entry.in_closure = True + + from_closure, in_closure = self.find_entries_used_in_closures(node) + in_closure.sort() + + # Now from the beginning + node.needs_closure = False + node.needs_outer_scope = False + + func_scope = node.local_scope + cscope = node.entry.scope + while cscope.is_py_class_scope or cscope.is_c_class_scope: + cscope = cscope.outer_scope + + if not from_closure and (self.path or inner_node): + if not inner_node: + if not node.py_cfunc_node: + raise InternalError("DefNode does not have assignment node") + inner_node = node.py_cfunc_node + inner_node.needs_closure_code = False + node.needs_outer_scope = False + + if node.is_generator: + pass + elif not in_closure and not from_closure: + return + elif not in_closure: + func_scope.is_passthrough = True + func_scope.scope_class = cscope.scope_class + node.needs_outer_scope = True + return + + # entry.cname can contain periods (eg. a derived C method of a class). + # We want to use the cname as part of a C struct name, so we replace + # periods with double underscores. + as_name = '%s_%s' % ( + target_module_scope.next_id(Naming.closure_class_prefix), + node.entry.cname.replace('.','__')) + as_name = EncodedString(as_name) + + entry = target_module_scope.declare_c_class( + name=as_name, pos=node.pos, defining=True, + implementing=True) + entry.type.is_final_type = True + + func_scope.scope_class = entry + class_scope = entry.type.scope + class_scope.is_internal = True + class_scope.is_closure_class_scope = True + if node.is_async_def or node.is_generator: + # Generators need their closure intact during cleanup as they resume to handle GeneratorExit + class_scope.directives['no_gc_clear'] = True + if Options.closure_freelist_size: + class_scope.directives['freelist'] = Options.closure_freelist_size + + if from_closure: + assert cscope.is_closure_scope + class_scope.declare_var(pos=node.pos, + name=Naming.outer_scope_cname, + cname=Naming.outer_scope_cname, + type=cscope.scope_class.type, + is_cdef=True) + node.needs_outer_scope = True + for name, entry in in_closure: + closure_entry = class_scope.declare_var( + pos=entry.pos, + name=entry.name if not entry.in_subscope else None, + cname=entry.cname, + type=entry.type, + is_cdef=True) + if entry.is_declared_generic: + closure_entry.is_declared_generic = 1 + node.needs_closure = True + # Do it here because other classes are already checked + target_module_scope.check_c_class(func_scope.scope_class) + + def visit_LambdaNode(self, node): + if not isinstance(node.def_node, Nodes.DefNode): + # fused function, an error has been previously issued + return node + + was_in_lambda = self.in_lambda + self.in_lambda = True + self.create_class_from_scope(node.def_node, self.module_scope, node) + self.visitchildren(node) + self.in_lambda = was_in_lambda + return node + + def visit_FuncDefNode(self, node): + if self.in_lambda: + self.visitchildren(node) + return node + if node.needs_closure or self.path: + self.create_class_from_scope(node, self.module_scope) + self.path.append(node) + self.visitchildren(node) + self.path.pop() + return node + + def visit_GeneratorBodyDefNode(self, node): + self.visitchildren(node) + return node + + def visit_CFuncDefNode(self, node): + if not node.overridable: + return self.visit_FuncDefNode(node) + else: + self.visitchildren(node) + return node + + def visit_GeneratorExpressionNode(self, node): + node = _HandleGeneratorArguments()(node) + return self.visit_LambdaNode(node) + + +class InjectGilHandling(VisitorTransform, SkipDeclarations): + """ + Allow certain Python operations inside of nogil blocks by implicitly acquiring the GIL. + + Must run before the AnalyseDeclarationsTransform to make sure the GILStatNodes get + set up, parallel sections know that the GIL is acquired inside of them, etc. + """ + nogil = False + + # special node handling + + def _inject_gil_in_nogil(self, node): + """Allow the (Python statement) node in nogil sections by wrapping it in a 'with gil' block.""" + if self.nogil: + node = Nodes.GILStatNode(node.pos, state='gil', body=node) + return node + + visit_RaiseStatNode = _inject_gil_in_nogil + visit_PrintStatNode = _inject_gil_in_nogil # sadly, not the function + + # further candidates: + # def visit_ReraiseStatNode(self, node): + + # nogil tracking + + def visit_GILStatNode(self, node): + was_nogil = self.nogil + self.nogil = (node.state == 'nogil') + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_CFuncDefNode(self, node): + was_nogil = self.nogil + if isinstance(node.declarator, Nodes.CFuncDeclaratorNode): + self.nogil = node.declarator.nogil and not node.declarator.with_gil + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_ParallelRangeNode(self, node): + was_nogil = self.nogil + self.nogil = node.nogil + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_ExprNode(self, node): + # No special GIL handling inside of expressions for now. + return node + + visit_Node = VisitorTransform.recurse_to_children + + +class GilCheck(VisitorTransform): + """ + Call `node.gil_check(env)` on each node to make sure we hold the + GIL when we need it. Raise an error when on Python operations + inside a `nogil` environment. + + Additionally, raise exceptions for closely nested with gil or with nogil + statements. The latter would abort Python. + """ + + def __call__(self, root): + self.env_stack = [root.scope] + self.nogil = False + + # True for 'cdef func() nogil:' functions, as the GIL may be held while + # calling this function (thus contained 'nogil' blocks may be valid). + self.nogil_declarator_only = False + + self.current_gilstat_node_knows_gil_state = False + return super(GilCheck, self).__call__(root) + + def _visit_scoped_children(self, node, gil_state): + was_nogil = self.nogil + outer_attrs = node.outer_attrs + if outer_attrs and len(self.env_stack) > 1: + self.nogil = self.env_stack[-2].nogil + self.visitchildren(node, outer_attrs) + + self.nogil = gil_state + self.visitchildren(node, attrs=None, exclude=outer_attrs) + self.nogil = was_nogil + + def visit_FuncDefNode(self, node): + self.env_stack.append(node.local_scope) + inner_nogil = node.local_scope.nogil + + nogil_declarator_only = self.nogil_declarator_only + if inner_nogil: + self.nogil_declarator_only = True + + if inner_nogil and node.nogil_check: + node.nogil_check(node.local_scope) + + self._visit_scoped_children(node, inner_nogil) + + # FuncDefNodes can be nested, because a cpdef function contains a def function + # inside it. Therefore restore to previous state + self.nogil_declarator_only = nogil_declarator_only + + self.env_stack.pop() + return node + + def visit_GILStatNode(self, node): + if node.condition is not None: + error(node.condition.pos, + "Non-constant condition in a " + "`with %s()` statement" % node.state) + return node + + if self.nogil and node.nogil_check: + node.nogil_check() + + was_nogil = self.nogil + is_nogil = (node.state == 'nogil') + + if was_nogil == is_nogil and not self.nogil_declarator_only: + if not was_nogil: + error(node.pos, "Trying to acquire the GIL while it is " + "already held.") + else: + error(node.pos, "Trying to release the GIL while it was " + "previously released.") + if self.nogil_declarator_only: + node.scope_gil_state_known = False + + if isinstance(node.finally_clause, Nodes.StatListNode): + # The finally clause of the GILStatNode is a GILExitNode, + # which is wrapped in a StatListNode. Just unpack that. + node.finally_clause, = node.finally_clause.stats + + nogil_declarator_only = self.nogil_declarator_only + self.nogil_declarator_only = False + current_gilstat_node_knows_gil_state = self.current_gilstat_node_knows_gil_state + self.current_gilstat_node_knows_gil_state = node.scope_gil_state_known + self._visit_scoped_children(node, is_nogil) + self.nogil_declarator_only = nogil_declarator_only + self.current_gilstat_node_knows_gil_state = current_gilstat_node_knows_gil_state + return node + + def visit_ParallelRangeNode(self, node): + if node.nogil or self.nogil_declarator_only: + node_was_nogil, node.nogil = node.nogil, False + node = Nodes.GILStatNode(node.pos, state='nogil', body=node) + if not node_was_nogil and self.nogil_declarator_only: + # We're in a "nogil" function, but that doesn't prove we + # didn't have the gil + node.scope_gil_state_known = False + return self.visit_GILStatNode(node) + + if not self.nogil: + error(node.pos, "prange() can only be used without the GIL") + # Forget about any GIL-related errors that may occur in the body + return None + + node.nogil_check(self.env_stack[-1]) + self.visitchildren(node) + return node + + def visit_ParallelWithBlockNode(self, node): + if not self.nogil: + error(node.pos, "The parallel section may only be used without " + "the GIL") + return None + if self.nogil_declarator_only: + # We're in a "nogil" function but that doesn't prove we didn't + # have the gil, so release it + node = Nodes.GILStatNode(node.pos, state='nogil', body=node) + node.scope_gil_state_known = False + return self.visit_GILStatNode(node) + + if node.nogil_check: + # It does not currently implement this, but test for it anyway to + # avoid potential future surprises + node.nogil_check(self.env_stack[-1]) + + self.visitchildren(node) + return node + + def visit_TryFinallyStatNode(self, node): + """ + Take care of try/finally statements in nogil code sections. + """ + if not self.nogil or isinstance(node, Nodes.GILStatNode): + return self.visit_Node(node) + + node.nogil_check = None + node.is_try_finally_in_nogil = True + self.visitchildren(node) + return node + + def visit_GILExitNode(self, node): + if not self.current_gilstat_node_knows_gil_state: + node.scope_gil_state_known = False + self.visitchildren(node) + return node + + def visit_Node(self, node): + if self.env_stack and self.nogil and node.nogil_check: + node.nogil_check(self.env_stack[-1]) + if node.outer_attrs: + self._visit_scoped_children(node, self.nogil) + else: + self.visitchildren(node) + if self.nogil: + node.in_nogil_context = True + return node + + +class CoerceCppTemps(EnvTransform, SkipDeclarations): + """ + For temporary expression that are implemented using std::optional it's necessary the temps are + assigned using `__pyx_t_x = value;` but accessed using `something = (*__pyx_t_x)`. This transform + inserts a coercion node to take care of this, and runs absolutely last (once nothing else can be + inserted into the tree) + + TODO: a possible alternative would be to split ExprNode.result() into ExprNode.rhs_rhs() and ExprNode.lhs_rhs()??? + """ + def visit_ModuleNode(self, node): + if self.current_env().cpp: + # skipping this makes it essentially free for C files + self.visitchildren(node) + return node + + def visit_ExprNode(self, node): + self.visitchildren(node) + if (self.current_env().directives['cpp_locals'] and + node.is_temp and node.type.is_cpp_class and + # Fake references are not replaced with "std::optional()". + not node.type.is_fake_reference): + node = ExprNodes.CppOptionalTempCoercion(node) + + return node + + +class TransformBuiltinMethods(EnvTransform): + """ + Replace Cython's own cython.* builtins by the corresponding tree nodes. + """ + + def visit_SingleAssignmentNode(self, node): + if node.declaration_only: + return None + else: + self.visitchildren(node) + return node + + def visit_AttributeNode(self, node): + self.visitchildren(node) + return self.visit_cython_attribute(node) + + def visit_NameNode(self, node): + return self.visit_cython_attribute(node) + + def visit_cython_attribute(self, node): + attribute = node.as_cython_attribute() + if attribute: + if attribute == u'__version__': + from .. import __version__ as version + node = ExprNodes.StringNode(node.pos, value=EncodedString(version)) + elif attribute == u'NULL': + node = ExprNodes.NullNode(node.pos) + elif attribute in (u'set', u'frozenset', u'staticmethod'): + node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute), + entry=self.current_env().builtin_scope().lookup_here(attribute)) + elif PyrexTypes.parse_basic_type(attribute): + pass + elif self.context.cython_scope.lookup_qualified_name(attribute): + pass + else: + error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute) + return node + + def visit_ExecStatNode(self, node): + lenv = self.current_env() + self.visitchildren(node) + if len(node.args) == 1: + node.args.append(ExprNodes.GlobalsExprNode(node.pos)) + if not lenv.is_module_scope: + node.args.append( + ExprNodes.LocalsExprNode( + node.pos, self.current_scope_node(), lenv)) + return node + + def _inject_locals(self, node, func_name): + # locals()/dir()/vars() builtins + lenv = self.current_env() + entry = lenv.lookup_here(func_name) + if entry: + # not the builtin + return node + pos = node.pos + if func_name in ('locals', 'vars'): + if func_name == 'locals' and len(node.args) > 0: + error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d" + % len(node.args)) + return node + elif func_name == 'vars': + if len(node.args) > 1: + error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d" + % len(node.args)) + if len(node.args) > 0: + return node # nothing to do + return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv) + else: # dir() + if len(node.args) > 1: + error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d" + % len(node.args)) + if len(node.args) > 0: + # optimised in Builtin.py + return node + if lenv.is_py_class_scope or lenv.is_module_scope: + if lenv.is_py_class_scope: + pyclass = self.current_scope_node() + locals_dict = ExprNodes.CloneNode(pyclass.dict) + else: + locals_dict = ExprNodes.GlobalsExprNode(pos) + return ExprNodes.SortedDictKeysNode(locals_dict) + local_names = sorted(var.name for var in lenv.entries.values() if var.name) + items = [ExprNodes.IdentifierStringNode(pos, value=var) + for var in local_names] + return ExprNodes.ListNode(pos, args=items) + + def visit_PrimaryCmpNode(self, node): + # special case: for in/not-in test, we do not need to sort locals() + self.visitchildren(node) + if node.operator in 'not_in': # in/not_in + if isinstance(node.operand2, ExprNodes.SortedDictKeysNode): + arg = node.operand2.arg + if isinstance(arg, ExprNodes.NoneCheckNode): + arg = arg.arg + node.operand2 = arg + return node + + def visit_CascadedCmpNode(self, node): + return self.visit_PrimaryCmpNode(node) + + def _inject_eval(self, node, func_name): + lenv = self.current_env() + entry = lenv.lookup(func_name) + if len(node.args) != 1 or (entry and not entry.is_builtin): + return node + # Inject globals and locals + node.args.append(ExprNodes.GlobalsExprNode(node.pos)) + if not lenv.is_module_scope: + node.args.append( + ExprNodes.LocalsExprNode( + node.pos, self.current_scope_node(), lenv)) + return node + + def _inject_super(self, node, func_name): + lenv = self.current_env() + entry = lenv.lookup_here(func_name) + if entry or node.args: + return node + # Inject no-args super + def_node = self.current_scope_node() + if not isinstance(def_node, Nodes.DefNode) or not def_node.args or len(self.env_stack) < 2: + return node + class_node, class_scope = self.env_stack[-2] + if class_scope.is_py_class_scope: + def_node.requires_classobj = True + class_node.class_cell.is_active = True + node.args = [ + ExprNodes.ClassCellNode( + node.pos, is_generator=def_node.is_generator), + ExprNodes.NameNode(node.pos, name=def_node.args[0].name) + ] + elif class_scope.is_c_class_scope: + node.args = [ + ExprNodes.NameNode( + node.pos, name=class_node.scope.name, + entry=class_node.entry), + ExprNodes.NameNode(node.pos, name=def_node.args[0].name) + ] + return node + + def visit_SimpleCallNode(self, node): + # cython.foo + function = node.function.as_cython_attribute() + if function: + if function in InterpretCompilerDirectives.unop_method_nodes: + if len(node.args) != 1: + error(node.function.pos, u"%s() takes exactly one argument" % function) + else: + node = InterpretCompilerDirectives.unop_method_nodes[function]( + node.function.pos, operand=node.args[0]) + elif function in InterpretCompilerDirectives.binop_method_nodes: + if len(node.args) != 2: + error(node.function.pos, u"%s() takes exactly two arguments" % function) + else: + node = InterpretCompilerDirectives.binop_method_nodes[function]( + node.function.pos, operand1=node.args[0], operand2=node.args[1]) + elif function == u'cast': + if len(node.args) != 2: + error(node.function.pos, + u"cast() takes exactly two arguments and an optional typecheck keyword") + else: + type = node.args[0].analyse_as_type(self.current_env()) + if type: + node = ExprNodes.TypecastNode( + node.function.pos, type=type, operand=node.args[1], typecheck=False) + else: + error(node.args[0].pos, "Not a type") + elif function == u'sizeof': + if len(node.args) != 1: + error(node.function.pos, u"sizeof() takes exactly one argument") + else: + type = node.args[0].analyse_as_type(self.current_env()) + if type: + node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type) + else: + node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0]) + elif function == 'cmod': + if len(node.args) != 2: + error(node.function.pos, u"cmod() takes exactly two arguments") + else: + node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1]) + node.cdivision = True + elif function == 'cdiv': + if len(node.args) != 2: + error(node.function.pos, u"cdiv() takes exactly two arguments") + else: + node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1]) + node.cdivision = True + elif function == u'set': + node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set')) + elif function == u'staticmethod': + node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod')) + elif self.context.cython_scope.lookup_qualified_name(function): + pass + else: + error(node.function.pos, + u"'%s' not a valid cython language construct" % function) + + self.visitchildren(node) + + if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name: + func_name = node.function.name + if func_name in ('dir', 'locals', 'vars'): + return self._inject_locals(node, func_name) + if func_name == 'eval': + return self._inject_eval(node, func_name) + if func_name == 'super': + return self._inject_super(node, func_name) + return node + + def visit_GeneralCallNode(self, node): + function = node.function.as_cython_attribute() + if function == u'cast': + # NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args + args = node.positional_args.args + kwargs = node.keyword_args.compile_time_value(None) + if (len(args) != 2 or len(kwargs) > 1 or + (len(kwargs) == 1 and 'typecheck' not in kwargs)): + error(node.function.pos, + u"cast() takes exactly two arguments and an optional typecheck keyword") + else: + type = args[0].analyse_as_type(self.current_env()) + if type: + typecheck = kwargs.get('typecheck', False) + node = ExprNodes.TypecastNode( + node.function.pos, type=type, operand=args[1], typecheck=typecheck) + else: + error(args[0].pos, "Not a type") + + self.visitchildren(node) + return node + + +class ReplaceFusedTypeChecks(VisitorTransform): + """ + This is not a transform in the pipeline. It is invoked on the specific + versions of a cdef function with fused argument types. It filters out any + type branches that don't match. e.g. + + if fused_t is mytype: + ... + elif fused_t in other_fused_type: + ... + """ + def __init__(self, local_scope): + super(ReplaceFusedTypeChecks, self).__init__() + self.local_scope = local_scope + # defer the import until now to avoid circular import time dependencies + from .Optimize import ConstantFolding + self.transform = ConstantFolding(reevaluate=True) + + def visit_IfStatNode(self, node): + """ + Filters out any if clauses with false compile time type check + expression. + """ + self.visitchildren(node) + return self.transform(node) + + def visit_GILStatNode(self, node): + """ + Fold constant condition of GILStatNode. + """ + self.visitchildren(node) + return self.transform(node) + + def visit_PrimaryCmpNode(self, node): + with Errors.local_errors(ignore=True): + type1 = node.operand1.analyse_as_type(self.local_scope) + type2 = node.operand2.analyse_as_type(self.local_scope) + + if type1 and type2: + false_node = ExprNodes.BoolNode(node.pos, value=False) + true_node = ExprNodes.BoolNode(node.pos, value=True) + + type1 = self.specialize_type(type1, node.operand1.pos) + op = node.operator + + if op in ('is', 'is_not', '==', '!='): + type2 = self.specialize_type(type2, node.operand2.pos) + + is_same = type1.same_as(type2) + eq = op in ('is', '==') + + if (is_same and eq) or (not is_same and not eq): + return true_node + + elif op in ('in', 'not_in'): + # We have to do an instance check directly, as operand2 + # needs to be a fused type and not a type with a subtype + # that is fused. First unpack the typedef + if isinstance(type2, PyrexTypes.CTypedefType): + type2 = type2.typedef_base_type + + if type1.is_fused: + error(node.operand1.pos, "Type is fused") + elif not type2.is_fused: + error(node.operand2.pos, + "Can only use 'in' or 'not in' on a fused type") + else: + types = PyrexTypes.get_specialized_types(type2) + + for specialized_type in types: + if type1.same_as(specialized_type): + if op == 'in': + return true_node + else: + return false_node + + if op == 'not_in': + return true_node + + return false_node + + return node + + def specialize_type(self, type, pos): + try: + return type.specialize(self.local_scope.fused_to_specific) + except KeyError: + error(pos, "Type is not specific") + return type + + def visit_Node(self, node): + self.visitchildren(node) + return node + + +class DebugTransform(CythonTransform): + """ + Write debug information for this Cython module. + """ + + def __init__(self, context, options, result): + super(DebugTransform, self).__init__(context) + self.visited = set() + # our treebuilder and debug output writer + # (see Cython.Debugger.debug_output.CythonDebugWriter) + self.tb = self.context.gdb_debug_outputwriter + #self.c_output_file = options.output_file + self.c_output_file = result.c_file + + # Closure support, basically treat nested functions as if the AST were + # never nested + self.nested_funcdefs = [] + + # tells visit_NameNode whether it should register step-into functions + self.register_stepinto = False + + def visit_ModuleNode(self, node): + self.tb.module_name = node.full_module_name + attrs = dict( + module_name=node.full_module_name, + filename=node.pos[0].filename, + c_filename=self.c_output_file) + + self.tb.start('Module', attrs) + + # serialize functions + self.tb.start('Functions') + # First, serialize functions normally... + self.visitchildren(node) + + # ... then, serialize nested functions + for nested_funcdef in self.nested_funcdefs: + self.visit_FuncDefNode(nested_funcdef) + + self.register_stepinto = True + self.serialize_modulenode_as_function(node) + self.register_stepinto = False + self.tb.end('Functions') + + # 2.3 compatibility. Serialize global variables + self.tb.start('Globals') + entries = {} + + for k, v in node.scope.entries.items(): + if (v.qualified_name not in self.visited and not + v.name.startswith('__pyx_') and not + v.type.is_cfunction and not + v.type.is_extension_type): + entries[k]= v + + self.serialize_local_variables(entries) + self.tb.end('Globals') + # self.tb.end('Module') # end Module after the line number mapping in + # Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map + return node + + def visit_FuncDefNode(self, node): + self.visited.add(node.local_scope.qualified_name) + + if getattr(node, 'is_wrapper', False): + return node + + if self.register_stepinto: + self.nested_funcdefs.append(node) + return node + + # node.entry.visibility = 'extern' + if node.py_func is None: + pf_cname = '' + else: + pf_cname = node.py_func.entry.func_cname + + # For functions defined using def, cname will be pyfunc_cname=__pyx_pf_* + # For functions defined using cpdef or cdef, cname will be func_cname=__pyx_f_* + # In all cases, cname will be the name of the function containing the actual code + cname = node.entry.pyfunc_cname or node.entry.func_cname + + attrs = dict( + name=node.entry.name or getattr(node, 'name', ''), + cname=cname, + pf_cname=pf_cname, + qualified_name=node.local_scope.qualified_name, + lineno=str(node.pos[1])) + + self.tb.start('Function', attrs=attrs) + + self.tb.start('Locals') + self.serialize_local_variables(node.local_scope.entries) + self.tb.end('Locals') + + self.tb.start('Arguments') + for arg in node.local_scope.arg_entries: + self.tb.start(arg.name) + self.tb.end(arg.name) + self.tb.end('Arguments') + + self.tb.start('StepIntoFunctions') + self.register_stepinto = True + self.visitchildren(node) + self.register_stepinto = False + self.tb.end('StepIntoFunctions') + self.tb.end('Function') + + return node + + def visit_NameNode(self, node): + if (self.register_stepinto and + node.type is not None and + node.type.is_cfunction and + getattr(node, 'is_called', False) and + node.entry.func_cname is not None): + # don't check node.entry.in_cinclude, as 'cdef extern: ...' + # declared functions are not 'in_cinclude'. + # This means we will list called 'cdef' functions as + # "step into functions", but this is not an issue as they will be + # recognized as Cython functions anyway. + attrs = dict(name=node.entry.func_cname) + self.tb.start('StepIntoFunction', attrs=attrs) + self.tb.end('StepIntoFunction') + + self.visitchildren(node) + return node + + def serialize_modulenode_as_function(self, node): + """ + Serialize the module-level code as a function so the debugger will know + it's a "relevant frame" and it will know where to set the breakpoint + for 'break modulename'. + """ + self._serialize_modulenode_as_function(node, dict( + name=node.full_module_name.rpartition('.')[-1], + cname=node.module_init_func_cname(), + pf_cname='', + # Ignore the qualified_name, breakpoints should be set using + # `cy break modulename:lineno` for module-level breakpoints. + qualified_name='', + lineno='1', + is_initmodule_function="True", + )) + + def _serialize_modulenode_as_function(self, node, attrs): + self.tb.start('Function', attrs=attrs) + + self.tb.start('Locals') + self.serialize_local_variables(node.scope.entries) + self.tb.end('Locals') + + self.tb.start('Arguments') + self.tb.end('Arguments') + + self.tb.start('StepIntoFunctions') + self.register_stepinto = True + self.visitchildren(node) + self.register_stepinto = False + self.tb.end('StepIntoFunctions') + + self.tb.end('Function') + + def serialize_local_variables(self, entries): + for entry in entries.values(): + if not entry.cname: + # not a local variable + continue + if entry.type.is_pyobject: + vartype = 'PythonObject' + else: + vartype = 'CObject' + + if entry.from_closure: + # We're dealing with a closure where a variable from an outer + # scope is accessed, get it from the scope object. + cname = '%s->%s' % (Naming.cur_scope_cname, + entry.outer_entry.cname) + + qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name, + entry.scope.name, + entry.name) + elif entry.in_closure: + cname = '%s->%s' % (Naming.cur_scope_cname, + entry.cname) + qname = entry.qualified_name + else: + cname = entry.cname + qname = entry.qualified_name + + if not entry.pos: + # this happens for variables that are not in the user's code, + # e.g. for the global __builtins__, __doc__, etc. We can just + # set the lineno to 0 for those. + lineno = '0' + else: + lineno = str(entry.pos[1]) + + attrs = dict( + name=entry.name, + cname=cname, + qualified_name=qname, + type=vartype, + lineno=lineno) + + self.tb.start('LocalVar', attrs) + self.tb.end('LocalVar') diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/PyrexTypes.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/PyrexTypes.py new file mode 100644 index 0000000000000000000000000000000000000000..8c57fc2e76ae16c7f2e174eed9f1e2930e3b32ee --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/PyrexTypes.py @@ -0,0 +1,5535 @@ +# +# Cython/Python language types +# + +from __future__ import absolute_import + +import copy +import hashlib +import re + +try: + reduce +except NameError: + from functools import reduce +from functools import partial +from itertools import product + +from Cython.Utils import cached_function +from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode +from . import StringEncoding +from . import Naming + +from .Errors import error, CannotSpecialize, performance_hint + + +class BaseType(object): + # + # Base class for all Cython types including pseudo-types. + + # List of attribute names of any subtypes + subtypes = [] + _empty_declaration = None + _specialization_name = None + default_format_spec = None + + def can_coerce_to_pyobject(self, env): + return False + + def can_coerce_from_pyobject(self, env): + return False + + def can_coerce_to_pystring(self, env, format_spec=None): + return False + + def convert_to_pystring(self, cvalue, code, format_spec=None): + raise NotImplementedError("C types that support string formatting must override this method") + + def cast_code(self, expr_code): + return "((%s)%s)" % (self.empty_declaration_code(), expr_code) + + def empty_declaration_code(self, pyrex=False): + if pyrex: + return self.declaration_code('', pyrex=True) + if self._empty_declaration is None: + self._empty_declaration = self.declaration_code('') + return self._empty_declaration + + def specialization_name(self): + if self._specialization_name is None: + # This is not entirely robust. + common_subs = (self.empty_declaration_code() + # covers both "unsigned " and "signed " + .replace("signed ", "signed_") + .replace("long long", "long_long") + .replace(" ", "__")) + self._specialization_name = re.sub( + '[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs) + return self._specialization_name + + def base_declaration_code(self, base_code, entity_code): + if entity_code: + return "%s %s" % (base_code, entity_code) + else: + return base_code + + def __deepcopy__(self, memo): + """ + Types never need to be copied, if we do copy, Unfortunate Things + Will Happen! + """ + return self + + def get_fused_types(self, result=None, seen=None, subtypes=None, include_function_return_type=False): + subtypes = subtypes or self.subtypes + if not subtypes: + return None + + if result is None: + result = [] + seen = set() + + for attr in subtypes: + list_or_subtype = getattr(self, attr) + if list_or_subtype: + if isinstance(list_or_subtype, BaseType): + list_or_subtype.get_fused_types(result, seen, include_function_return_type=include_function_return_type) + else: + for subtype in list_or_subtype: + subtype.get_fused_types(result, seen, include_function_return_type=include_function_return_type) + + return result + + def specialize_fused(self, env): + if env.fused_to_specific: + return self.specialize(env.fused_to_specific) + + return self + + @property + def is_fused(self): + """ + Whether this type or any of its subtypes is a fused type + """ + # Add this indirection for the is_fused property to allow overriding + # get_fused_types in subclasses. + return self.get_fused_types() + + def deduce_template_params(self, actual): + """ + Deduce any template params in this (argument) type given the actual + argument type. + + https://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction + """ + return {} + + def __lt__(self, other): + """ + For sorting. The sorting order should correspond to the preference of + conversion from Python types. + + Override to provide something sensible. This is only implemented so that + python 3 doesn't trip + """ + return id(type(self)) < id(type(other)) + + def py_type_name(self): + """ + Return the name of the Python type that can coerce to this type. + """ + + def typeof_name(self): + """ + Return the string with which fused python functions can be indexed. + """ + if self.is_builtin_type or self.py_type_name() == 'object': + index_name = self.py_type_name() + else: + index_name = str(self) + + return index_name + + def check_for_null_code(self, cname): + """ + Return the code for a NULL-check in case an UnboundLocalError should + be raised if an entry of this type is referenced before assignment. + Returns None if no check should be performed. + """ + return None + + def invalid_value(self): + """ + Returns the most invalid value an object of this type can assume as a + C expression string. Returns None if no such value exists. + """ + + +class PyrexType(BaseType): + # + # Base class for all Cython types + # + # is_pyobject boolean Is a Python object type + # is_extension_type boolean Is a Python extension type + # is_final_type boolean Is a final extension type + # is_numeric boolean Is a C numeric type + # is_int boolean Is a C integer type + # is_float boolean Is a C floating point type + # is_complex boolean Is a C complex type + # is_void boolean Is the C void type + # is_array boolean Is a C array type + # is_ptr boolean Is a C pointer type + # is_null_ptr boolean Is the type of NULL + # is_reference boolean Is a C reference type + # is_rvalue_reference boolean Is a C++ rvalue reference type + # is_const boolean Is a C const type + # is_volatile boolean Is a C volatile type + # is_cv_qualified boolean Is a C const or volatile type + # is_cfunction boolean Is a C function type + # is_struct_or_union boolean Is a C struct or union type + # is_struct boolean Is a C struct type + # is_cpp_class boolean Is a C++ class + # is_optional_cpp_class boolean Is a C++ class with variable lifetime handled with std::optional + # is_enum boolean Is a C enum type + # is_cpp_enum boolean Is a C++ scoped enum type + # is_typedef boolean Is a typedef type + # is_string boolean Is a C char * type + # is_pyunicode_ptr boolean Is a C PyUNICODE * type + # is_cpp_string boolean Is a C++ std::string type + # python_type_constructor_name string or None non-None if it is a Python type constructor that can be indexed/"templated" + # is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE + # is_returncode boolean Is used only to signal exceptions + # is_error boolean Is the dummy error type + # is_buffer boolean Is buffer access type + # is_pythran_expr boolean Is Pythran expr + # is_numpy_buffer boolean Is Numpy array buffer + # has_attributes boolean Has C dot-selectable attributes + # needs_cpp_construction boolean Needs C++ constructor and destructor when used in a cdef class + # needs_refcounting boolean Needs code to be generated similar to incref/gotref/decref. + # Largely used internally. + # refcounting_needs_gil boolean Reference counting needs GIL to be acquired. + # equivalent_type type A C or Python type that is equivalent to this Python or C type. + # default_value string Initial value that can be assigned before first user assignment. + # declaration_value string The value statically assigned on declaration (if any). + # entry Entry The Entry for this type + # + # declaration_code(entity_code, + # for_display = 0, dll_linkage = None, pyrex = 0) + # Returns a code fragment for the declaration of an entity + # of this type, given a code fragment for the entity. + # * If for_display, this is for reading by a human in an error + # message; otherwise it must be valid C code. + # * If dll_linkage is not None, it must be 'DL_EXPORT' or + # 'DL_IMPORT', and will be added to the base type part of + # the declaration. + # * If pyrex = 1, this is for use in a 'cdef extern' + # statement of a Cython include file. + # + # assignable_from(src_type) + # Tests whether a variable of this type can be + # assigned a value of type src_type. + # + # same_as(other_type) + # Tests whether this type represents the same type + # as other_type. + # + # as_argument_type(): + # Coerces array and C function types into pointer type for use as + # a formal argument type. + # + + is_pyobject = 0 + is_unspecified = 0 + is_extension_type = 0 + is_final_type = 0 + is_builtin_type = 0 + is_cython_builtin_type = 0 + is_numeric = 0 + is_int = 0 + is_float = 0 + is_complex = 0 + is_void = 0 + is_array = 0 + is_ptr = 0 + is_null_ptr = 0 + is_reference = 0 + is_fake_reference = 0 + is_rvalue_reference = 0 + is_const = 0 + is_volatile = 0 + is_cv_qualified = 0 + is_cfunction = 0 + is_struct_or_union = 0 + is_cpp_class = 0 + is_optional_cpp_class = 0 + python_type_constructor_name = None + is_cpp_string = 0 + is_struct = 0 + is_enum = 0 + is_cpp_enum = False + is_typedef = 0 + is_string = 0 + is_pyunicode_ptr = 0 + is_unicode_char = 0 + is_returncode = 0 + is_error = 0 + is_buffer = 0 + is_ctuple = 0 + is_memoryviewslice = 0 + is_pythran_expr = 0 + is_numpy_buffer = 0 + has_attributes = 0 + needs_cpp_construction = 0 + needs_refcounting = 0 + refcounting_needs_gil = True + equivalent_type = None + default_value = "" + declaration_value = "" + + def resolve(self): + # If a typedef, returns the base type. + return self + + def specialize(self, values): + # Returns the concrete type if this is a fused type, or otherwise the type itself. + # May raise Errors.CannotSpecialize on failure + return self + + def literal_code(self, value): + # Returns a C code fragment representing a literal + # value of this type. + return str(value) + + def __str__(self): + return self.declaration_code("", for_display = 1).strip() + + def same_as(self, other_type, **kwds): + return self.same_as_resolved_type(other_type.resolve(), **kwds) + + def same_as_resolved_type(self, other_type): + return self == other_type or other_type is error_type + + def subtype_of(self, other_type): + return self.subtype_of_resolved_type(other_type.resolve()) + + def subtype_of_resolved_type(self, other_type): + return self.same_as(other_type) + + def assignable_from(self, src_type): + return self.assignable_from_resolved_type(src_type.resolve()) + + def assignable_from_resolved_type(self, src_type): + return self.same_as(src_type) + + def assignment_failure_extra_info(self, src_type, src_name): + """Override if you can provide useful extra information about why an assignment didn't work. + + src_name may be None if unavailable""" + return "" + + def as_argument_type(self): + return self + + def is_complete(self): + # A type is incomplete if it is an unsized array, + # a struct whose attributes are not defined, etc. + return 1 + + def is_simple_buffer_dtype(self): + return False + + def can_be_optional(self): + """Returns True if type can be used with typing.Optional[].""" + return False + + def struct_nesting_depth(self): + # Returns the number levels of nested structs. This is + # used for constructing a stack for walking the run-time + # type information of the struct. + return 1 + + def global_init_code(self, entry, code): + # abstract + pass + + def needs_nonecheck(self): + return 0 + + def _assign_from_py_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None, extra_args=None, + special_none_cvalue=None): + args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else '' + convert_call = "%s(%s%s)" % ( + from_py_function or self.from_py_function, + source_code, + args, + ) + if self.is_enum: + convert_call = typecast(self, c_long_type, convert_call) + if special_none_cvalue: + # NOTE: requires 'source_code' to be simple! + convert_call = "(__Pyx_Py_IsNone(%s) ? (%s) : (%s))" % ( + source_code, special_none_cvalue, convert_call) + return '%s = %s; %s' % ( + result_code, + convert_call, + code.error_goto_if(error_condition or self.error_condition(result_code), error_pos)) + + def _generate_dummy_refcounting(self, code, *ignored_args, **ignored_kwds): + if self.needs_refcounting: + raise NotImplementedError("Ref-counting operation not yet implemented for type %s" % + self) + + def _generate_dummy_refcounting_assignment(self, code, cname, rhs_cname, *ignored_args, **ignored_kwds): + if self.needs_refcounting: + raise NotImplementedError("Ref-counting operation not yet implemented for type %s" % + self) + code.putln("%s = %s" % (cname, rhs_cname)) + + generate_incref = generate_xincref = generate_decref = generate_xdecref \ + = generate_decref_clear = generate_xdecref_clear \ + = generate_gotref = generate_xgotref = generate_giveref = generate_xgiveref \ + = _generate_dummy_refcounting + + generate_decref_set = generate_xdecref_set = _generate_dummy_refcounting_assignment + + def nullcheck_string(self, code, cname): + if self.needs_refcounting: + raise NotImplementedError("Ref-counting operation not yet implemented for type %s" % + self) + code.putln("1") + + def cpp_optional_declaration_code(self, entity_code, dll_linkage=None): + # declares an std::optional c++ variable + raise NotImplementedError( + "cpp_optional_declaration_code only implemented for c++ classes and not type %s" % self) + + +def public_decl(base_code, dll_linkage): + if dll_linkage: + return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA ')) + else: + return base_code + + +def create_typedef_type(name, base_type, cname, is_external=0, namespace=None): + if is_external: + if base_type.is_complex or base_type.is_fused: + raise ValueError("%s external typedefs not supported" % ( + "Fused" if base_type.is_fused else "Complex")) + if base_type.is_complex or base_type.is_fused: + return base_type + return CTypedefType(name, base_type, cname, is_external, namespace) + + +class CTypedefType(BaseType): + # + # Pseudo-type defined with a ctypedef statement in a + # 'cdef extern from' block. + # Delegates most attribute lookups to the base type. + # (Anything not defined here or in the BaseType is delegated.) + # + # qualified_name string + # typedef_name string + # typedef_cname string + # typedef_base_type PyrexType + # typedef_is_external bool + + is_typedef = 1 + typedef_is_external = 0 + + to_py_utility_code = None + from_py_utility_code = None + + subtypes = ['typedef_base_type'] + + def __init__(self, name, base_type, cname, is_external=0, namespace=None): + assert not base_type.is_complex + self.typedef_name = name + self.typedef_cname = cname + self.typedef_base_type = base_type + self.typedef_is_external = is_external + self.typedef_namespace = namespace + + def invalid_value(self): + return self.typedef_base_type.invalid_value() + + def resolve(self): + return self.typedef_base_type.resolve() + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = self.typedef_name + else: + base_code = public_decl(self.typedef_cname, dll_linkage) + if self.typedef_namespace is not None and not pyrex: + base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code) + return self.base_declaration_code(base_code, entity_code) + + def as_argument_type(self): + return self + + def cast_code(self, expr_code): + # If self is really an array (rather than pointer), we can't cast. + # For example, the gmp mpz_t. + if self.typedef_base_type.is_array: + base_type = self.typedef_base_type.base_type + return CPtrType(base_type).cast_code(expr_code) + else: + return BaseType.cast_code(self, expr_code) + + def specialize(self, values): + base_type = self.typedef_base_type.specialize(values) + namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None + if base_type is self.typedef_base_type and namespace is self.typedef_namespace: + return self + else: + return create_typedef_type(self.typedef_name, base_type, self.typedef_cname, + 0, namespace) + + def __repr__(self): + return "" % self.typedef_cname + + def __str__(self): + return self.typedef_name + + def _create_utility_code(self, template_utility_code, + template_function_name): + type_name = type_identifier(self.typedef_cname) + utility_code = template_utility_code.specialize( + type = self.typedef_cname, + TypeName = type_name) + function_name = template_function_name % type_name + return utility_code, function_name + + def create_to_py_utility_code(self, env): + if self.typedef_is_external: + if not self.to_py_utility_code: + base_type = self.typedef_base_type + if type(base_type) is CIntType: + self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() + env.use_utility_code(TempitaUtilityCode.load_cached( + "CIntToPy", "TypeConversion.c", + context={"TYPE": self.empty_declaration_code(), + "TO_PY_FUNCTION": self.to_py_function})) + return True + elif base_type.is_float: + pass # XXX implement! + elif base_type.is_complex: + pass # XXX implement! + pass + elif base_type.is_cpp_string: + cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self) + context = { + 'cname': cname, + 'type': self.typedef_cname, + } + from .UtilityCode import CythonUtilityCode + env.use_utility_code(CythonUtilityCode.load( + "string.to_py", "CppConvert.pyx", context=context)) + self.to_py_function = cname + return True + if self.to_py_utility_code: + env.use_utility_code(self.to_py_utility_code) + return True + # delegation + return self.typedef_base_type.create_to_py_utility_code(env) + + def create_from_py_utility_code(self, env): + if self.typedef_is_external: + if not self.from_py_utility_code: + base_type = self.typedef_base_type + if type(base_type) is CIntType: + self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() + env.use_utility_code(TempitaUtilityCode.load_cached( + "CIntFromPy", "TypeConversion.c", + context={ + "TYPE": self.empty_declaration_code(), + "FROM_PY_FUNCTION": self.from_py_function, + "IS_ENUM": base_type.is_enum, + })) + return True + elif base_type.is_float: + pass # XXX implement! + elif base_type.is_complex: + pass # XXX implement! + elif base_type.is_cpp_string: + cname = '__pyx_convert_string_from_py_%s' % type_identifier(self) + context = { + 'cname': cname, + 'type': self.typedef_cname, + } + from .UtilityCode import CythonUtilityCode + env.use_utility_code(CythonUtilityCode.load( + "string.from_py", "CppConvert.pyx", context=context)) + self.from_py_function = cname + return True + if self.from_py_utility_code: + env.use_utility_code(self.from_py_utility_code) + return True + # delegation + return self.typedef_base_type.create_from_py_utility_code(env) + + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + if to_py_function is None: + to_py_function = self.to_py_function + return self.typedef_base_type.to_py_call_code( + source_code, result_code, result_type, to_py_function) + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None, + special_none_cvalue=None): + return self.typedef_base_type.from_py_call_code( + source_code, result_code, error_pos, code, + from_py_function or self.from_py_function, + error_condition or self.error_condition(result_code), + special_none_cvalue=special_none_cvalue, + ) + + def overflow_check_binop(self, binop, env, const_rhs=False): + env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) + type = self.empty_declaration_code() + name = self.specialization_name() + if binop == "lshift": + env.use_utility_code(TempitaUtilityCode.load_cached( + "LeftShift", "Overflow.c", + context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) + else: + if const_rhs: + binop += "_const" + _load_overflow_base(env) + env.use_utility_code(TempitaUtilityCode.load_cached( + "SizeCheck", "Overflow.c", + context={'TYPE': type, 'NAME': name})) + env.use_utility_code(TempitaUtilityCode.load_cached( + "Binop", "Overflow.c", + context={'TYPE': type, 'NAME': name, 'BINOP': binop})) + return "__Pyx_%s_%s_checking_overflow" % (binop, name) + + def error_condition(self, result_code): + if self.typedef_is_external: + if self.exception_value: + condition = "(%s == %s)" % ( + result_code, self.cast_code(self.exception_value)) + if self.exception_check: + condition += " && PyErr_Occurred()" + return condition + # delegation + return self.typedef_base_type.error_condition(result_code) + + def __getattr__(self, name): + return getattr(self.typedef_base_type, name) + + def py_type_name(self): + return self.typedef_base_type.py_type_name() + + def can_coerce_to_pyobject(self, env): + return self.typedef_base_type.can_coerce_to_pyobject(env) + + def can_coerce_from_pyobject(self, env): + return self.typedef_base_type.can_coerce_from_pyobject(env) + + +class MemoryViewSliceType(PyrexType): + + is_memoryviewslice = 1 + default_value = "{ 0, 0, { 0 }, { 0 }, { 0 } }" + + has_attributes = 1 + needs_refcounting = 1 # Ideally this would be true and reference counting for + # memoryview and pyobject code could be generated in the same way. + # However, memoryviews are sufficiently specialized that this doesn't + # seem practical. Implement a limited version of it for now + refcounting_needs_gil = False # __PYX_XCLEAR_MEMVIEW acquires GIL internally. + scope = None + + # These are special cased in Defnode + from_py_function = None + to_py_function = None + + exception_value = None + exception_check = True + + subtypes = ['dtype'] + + def __init__(self, base_dtype, axes): + """ + MemoryViewSliceType(base, axes) + + Base is the C base type; axes is a list of (access, packing) strings, + where access is one of 'full', 'direct' or 'ptr' and packing is one of + 'contig', 'strided' or 'follow'. There is one (access, packing) tuple + for each dimension. + + the access specifiers determine whether the array data contains + pointers that need to be dereferenced along that axis when + retrieving/setting: + + 'direct' -- No pointers stored in this dimension. + 'ptr' -- Pointer stored in this dimension. + 'full' -- Check along this dimension, don't assume either. + + the packing specifiers specify how the array elements are laid-out + in memory. + + 'contig' -- The data is contiguous in memory along this dimension. + At most one dimension may be specified as 'contig'. + 'strided' -- The data isn't contiguous along this dimension. + 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension + has its stride automatically computed from extents of the other + dimensions to ensure C or Fortran memory layout. + + C-contiguous memory has 'direct' as the access spec, 'contig' as the + *last* axis' packing spec and 'follow' for all other packing specs. + + Fortran-contiguous memory has 'direct' as the access spec, 'contig' as + the *first* axis' packing spec and 'follow' for all other packing + specs. + """ + from . import Buffer, MemoryView + + self.dtype = base_dtype + self.axes = axes + self.ndim = len(axes) + self.flags = MemoryView.get_buf_flags(self.axes) + + self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes) + assert not (self.is_c_contig and self.is_f_contig) + + self.mode = MemoryView.get_mode(axes) + self.writable_needed = False + + if not self.dtype.is_fused: + self.dtype_name = Buffer.mangle_dtype_name(self.dtype) + + def __hash__(self): + return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes)) + + def __eq__(self, other): + if isinstance(other, BaseType): + return self.same_as_resolved_type(other) + else: + return False + + def __ne__(self, other): + # TODO drop when Python2 is dropped + return not (self == other) + + def same_as_resolved_type(self, other_type): + return ((other_type.is_memoryviewslice and + #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional + self.dtype.same_as(other_type.dtype) and + self.axes == other_type.axes) or + other_type is error_type) + + def needs_nonecheck(self): + return True + + def is_complete(self): + # incomplete since the underlying struct doesn't have a cython.memoryview object. + return 0 + + def can_be_optional(self): + """Returns True if type can be used with typing.Optional[].""" + return True + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + # XXX: we put these guards in for now... + assert not dll_linkage + from . import MemoryView + base_code = StringEncoding.EncodedString( + str(self) if pyrex or for_display else MemoryView.memviewslice_cname) + return self.base_declaration_code( + base_code, + entity_code) + + def attributes_known(self): + if self.scope is None: + from . import Symtab + + self.scope = scope = Symtab.CClassScope( + 'mvs_class_'+self.specialization_suffix(), + None, + visibility='extern', + parent_type=self) + + scope.directives = {} + + scope.declare_var('_data', c_char_ptr_type, None, + cname='data', is_cdef=1) + + return True + + def declare_attribute(self, attribute, env, pos): + from . import MemoryView, Options + + scope = self.scope + + if attribute == 'shape': + scope.declare_var('shape', + c_array_type(c_py_ssize_t_type, + Options.buffer_max_dims), + pos, + cname='shape', + is_cdef=1) + + elif attribute == 'strides': + scope.declare_var('strides', + c_array_type(c_py_ssize_t_type, + Options.buffer_max_dims), + pos, + cname='strides', + is_cdef=1) + + elif attribute == 'suboffsets': + scope.declare_var('suboffsets', + c_array_type(c_py_ssize_t_type, + Options.buffer_max_dims), + pos, + cname='suboffsets', + is_cdef=1) + + elif attribute in ("copy", "copy_fortran"): + ndim = len(self.axes) + + follow_dim = [('direct', 'follow')] + contig_dim = [('direct', 'contig')] + to_axes_c = follow_dim * (ndim - 1) + contig_dim + to_axes_f = contig_dim + follow_dim * (ndim -1) + + dtype = self.dtype + if dtype.is_cv_qualified: + dtype = dtype.cv_base_type + + to_memview_c = MemoryViewSliceType(dtype, to_axes_c) + to_memview_f = MemoryViewSliceType(dtype, to_axes_f) + + for to_memview, cython_name in [(to_memview_c, "copy"), + (to_memview_f, "copy_fortran")]: + copy_func_type = CFuncType( + to_memview, + [CFuncTypeArg("memviewslice", self, None)]) + copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview) + + entry = scope.declare_cfunction( + cython_name, + copy_func_type, pos=pos, defining=1, + cname=copy_cname) + + utility = MemoryView.get_copy_new_utility(pos, self, to_memview) + env.use_utility_code(utility) + + MemoryView.use_cython_array_utility_code(env) + + elif attribute in ("is_c_contig", "is_f_contig"): + # is_c_contig and is_f_contig functions + for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')): + + is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim) + + cfunctype = CFuncType( + return_type=c_bint_type, + args=[CFuncTypeArg("memviewslice", self, None)], + exception_value="-1", + ) + + entry = scope.declare_cfunction(cython_name, + cfunctype, + pos=pos, + defining=1, + cname=is_contig_name) + + entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim) + + return True + + def get_entry(self, node, cname=None, type=None): + from . import MemoryView, Symtab + + if cname is None: + assert node.is_simple() or node.is_temp or node.is_elemental + cname = node.result() + + if type is None: + type = node.type + + entry = Symtab.Entry(cname, cname, type, node.pos) + return MemoryView.MemoryViewSliceBufferEntry(entry) + + def conforms_to(self, dst, broadcast=False, copying=False): + """ + Returns True if src conforms to dst, False otherwise. + + If conformable, the types are the same, the ndims are equal, and each axis spec is conformable. + + Any packing/access spec is conformable to itself. + + 'direct' and 'ptr' are conformable to 'full'. + 'contig' and 'follow' are conformable to 'strided'. + Any other combo is not conformable. + """ + from . import MemoryView + + src = self + + #if not copying and self.writable_needed and not dst.writable_needed: + # return False + + src_dtype, dst_dtype = src.dtype, dst.dtype + # We can add but not remove const/volatile modifiers + # (except if we are copying by value, then anything is fine) + if not copying: + if src_dtype.is_const and not dst_dtype.is_const: + return False + if src_dtype.is_volatile and not dst_dtype.is_volatile: + return False + # const/volatile checks are done, remove those qualifiers + if src_dtype.is_cv_qualified: + src_dtype = src_dtype.cv_base_type + if dst_dtype.is_cv_qualified: + dst_dtype = dst_dtype.cv_base_type + + if not src_dtype.same_as(dst_dtype): + return False + + if src.ndim != dst.ndim: + if broadcast: + src, dst = MemoryView.broadcast_types(src, dst) + else: + return False + + for src_spec, dst_spec in zip(src.axes, dst.axes): + src_access, src_packing = src_spec + dst_access, dst_packing = dst_spec + if src_access != dst_access and dst_access != 'full': + return False + if src_packing != dst_packing and dst_packing != 'strided' and not copying: + return False + + return True + + def valid_dtype(self, dtype, i=0): + """ + Return whether type dtype can be used as the base type of a + memoryview slice. + + We support structs, numeric types and objects + """ + if dtype.is_complex and dtype.real_type.is_int: + return False + + if dtype.is_struct and dtype.kind == 'struct': + for member in dtype.scope.var_entries: + if not self.valid_dtype(member.type): + return False + + return True + + return ( + dtype.is_error or + # Pointers are not valid (yet) + # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or + (dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or + dtype.is_numeric or + dtype.is_pyobject or + dtype.is_fused or # accept this as it will be replaced by specializations later + (dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type)) + ) + + def validate_memslice_dtype(self, pos): + if not self.valid_dtype(self.dtype): + error(pos, "Invalid base type for memoryview slice: %s" % self.dtype) + + def assert_direct_dims(self, pos): + for access, packing in self.axes: + if access != 'direct': + error(pos, "All dimensions must be direct") + return False + return True + + def transpose(self, pos): + if not self.assert_direct_dims(pos): + return error_type + return MemoryViewSliceType(self.dtype, self.axes[::-1]) + + def specialization_name(self): + return '%s_%s' % ( + super(MemoryViewSliceType,self).specialization_name(), + self.specialization_suffix()) + + def specialization_suffix(self): + return "%s_%s" % (self.axes_to_name(), self.dtype_name) + + def can_coerce_to_pyobject(self, env): + return True + + def can_coerce_from_pyobject(self, env): + return True + + def check_for_null_code(self, cname): + return cname + '.memview' + + def create_from_py_utility_code(self, env): + from . import MemoryView, Buffer + + # We don't have 'code', so use a LazyUtilityCode with a callback. + def lazy_utility_callback(code): + context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype) + return TempitaUtilityCode.load( + "ObjectToMemviewSlice", "MemoryView_C.c", context=context) + + env.use_utility_code(MemoryView.memviewslice_init_code) + env.use_utility_code(LazyUtilityCode(lazy_utility_callback)) + + if self.is_c_contig: + c_or_f_flag = "__Pyx_IS_C_CONTIG" + elif self.is_f_contig: + c_or_f_flag = "__Pyx_IS_F_CONTIG" + else: + c_or_f_flag = "0" + + suffix = self.specialization_suffix() + funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix + + context = dict( + MemoryView.context, + buf_flag = self.flags, + ndim = self.ndim, + axes_specs = ', '.join(self.axes_to_code()), + dtype_typedecl = self.dtype.empty_declaration_code(), + struct_nesting_depth = self.dtype.struct_nesting_depth(), + c_or_f_flag = c_or_f_flag, + funcname = funcname, + ) + + self.from_py_function = funcname + return True + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None, + special_none_cvalue=None): + # NOTE: auto-detection of readonly buffers is disabled: + # writable = self.writable_needed or not self.dtype.is_const + writable = not self.dtype.is_const + return self._assign_from_py_code( + source_code, result_code, error_pos, code, from_py_function, error_condition, + extra_args=['PyBUF_WRITABLE' if writable else '0'], + special_none_cvalue=special_none_cvalue, + ) + + def create_to_py_utility_code(self, env): + self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env) + return True + + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + assert self._dtype_to_py_func + assert self._dtype_from_py_func + + to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func + from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func + + tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject) + return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup + + def dtype_object_conversion_funcs(self, env): + get_function = "__pyx_memview_get_%s" % self.dtype_name + set_function = "__pyx_memview_set_%s" % self.dtype_name + + context = dict( + get_function = get_function, + set_function = set_function, + ) + + if self.dtype.is_pyobject: + utility_name = "MemviewObjectToObject" + else: + self.dtype.create_to_py_utility_code(env) + to_py_function = self.dtype.to_py_function + + from_py_function = None + if not self.dtype.is_const: + self.dtype.create_from_py_utility_code(env) + from_py_function = self.dtype.from_py_function + + if not (to_py_function or from_py_function): + return "NULL", "NULL" + if not to_py_function: + get_function = "NULL" + if not from_py_function: + set_function = "NULL" + + utility_name = "MemviewDtypeToObject" + error_condition = (self.dtype.error_condition('value') or + 'PyErr_Occurred()') + context.update( + to_py_function=to_py_function, + from_py_function=from_py_function, + dtype=self.dtype.empty_declaration_code(), + error_condition=error_condition, + ) + + utility = TempitaUtilityCode.load_cached( + utility_name, "MemoryView_C.c", context=context) + env.use_utility_code(utility) + return get_function, set_function + + def axes_to_code(self): + """Return a list of code constants for each axis""" + from . import MemoryView + d = MemoryView._spec_to_const + return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes] + + def axes_to_name(self): + """Return an abbreviated name for our axes""" + from . import MemoryView + d = MemoryView._spec_to_abbrev + return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes]) + + def error_condition(self, result_code): + return "!%s.memview" % result_code + + def __str__(self): + from . import MemoryView + + axes_code_list = [] + for idx, (access, packing) in enumerate(self.axes): + flag = MemoryView.get_memoryview_flag(access, packing) + if flag == "strided": + axes_code_list.append(":") + else: + if flag == 'contiguous': + have_follow = [p for a, p in self.axes[idx - 1:idx + 2] + if p == 'follow'] + if have_follow or self.ndim == 1: + flag = '1' + + axes_code_list.append("::" + flag) + + if self.dtype.is_pyobject: + dtype_name = self.dtype.name + else: + dtype_name = self.dtype + + return "%s[%s]" % (dtype_name, ", ".join(axes_code_list)) + + def specialize(self, values): + """This does not validate the base type!!""" + dtype = self.dtype.specialize(values) + if dtype is not self.dtype: + return MemoryViewSliceType(dtype, self.axes) + + return self + + def cast_code(self, expr_code): + return expr_code + + # When memoryviews are increfed currently seems heavily special-cased. + # Therefore, use our own function for now + def generate_incref(self, code, name, **kwds): + pass + + def generate_incref_memoryviewslice(self, code, slice_cname, have_gil): + # TODO ideally would be done separately + code.putln("__PYX_INC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil))) + + # decref however did look to always apply for memoryview slices + # with "have_gil" set to True by default + def generate_xdecref(self, code, cname, nanny, have_gil): + code.putln("__PYX_XCLEAR_MEMVIEW(&%s, %d);" % (cname, int(have_gil))) + + def generate_decref(self, code, cname, nanny, have_gil): + # Fall back to xdecref since we don't care to have a separate decref version for this. + self.generate_xdecref(code, cname, nanny, have_gil) + + def generate_xdecref_clear(self, code, cname, clear_before_decref, **kwds): + self.generate_xdecref(code, cname, **kwds) + code.putln("%s.memview = NULL; %s.data = NULL;" % (cname, cname)) + + def generate_decref_clear(self, code, cname, **kwds): + # memoryviews don't currently distinguish between xdecref and decref + self.generate_xdecref_clear(code, cname, **kwds) + + # memoryviews don't participate in giveref/gotref + generate_gotref = generate_xgotref = generate_xgiveref = generate_giveref = lambda *args: None + + + +class BufferType(BaseType): + # + # Delegates most attribute lookups to the base type. + # (Anything not defined here or in the BaseType is delegated.) + # + # dtype PyrexType + # ndim int + # mode str + # negative_indices bool + # cast bool + # is_buffer bool + # writable bool + + is_buffer = 1 + writable = True + + subtypes = ['dtype'] + + def __init__(self, base, dtype, ndim, mode, negative_indices, cast): + self.base = base + self.dtype = dtype + self.ndim = ndim + self.buffer_ptr_type = CPtrType(dtype) + self.mode = mode + self.negative_indices = negative_indices + self.cast = cast + self.is_numpy_buffer = self.base.name == "ndarray" + + def can_coerce_to_pyobject(self,env): + return True + + def can_coerce_from_pyobject(self,env): + return True + + def as_argument_type(self): + return self + + def specialize(self, values): + dtype = self.dtype.specialize(values) + if dtype is not self.dtype: + return BufferType(self.base, dtype, self.ndim, self.mode, + self.negative_indices, self.cast) + return self + + def get_entry(self, node): + from . import Buffer + assert node.is_name + return Buffer.BufferEntry(node.entry) + + def __getattr__(self, name): + return getattr(self.base, name) + + def __repr__(self): + return "" % self.base + + def __str__(self): + # avoid ', ', as fused functions split the signature string on ', ' + cast_str = '' + if self.cast: + cast_str = ',cast=True' + + return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim, + cast_str) + + def assignable_from(self, other_type): + if other_type.is_buffer: + return (self.same_as(other_type, compare_base=False) and + self.base.assignable_from(other_type.base)) + + return self.base.assignable_from(other_type) + + def same_as(self, other_type, compare_base=True): + if not other_type.is_buffer: + return other_type.same_as(self.base) + + return (self.dtype.same_as(other_type.dtype) and + self.ndim == other_type.ndim and + self.mode == other_type.mode and + self.cast == other_type.cast and + (not compare_base or self.base.same_as(other_type.base))) + + +class PyObjectType(PyrexType): + # + # Base class for all Python object types (reference-counted). + # + # buffer_defaults dict or None Default options for buffer + + name = "object" + is_pyobject = 1 + default_value = "0" + declaration_value = "0" + buffer_defaults = None + is_external = False + is_subclassed = False + is_gc_simple = False + builtin_trashcan = False # builtin type using trashcan + needs_refcounting = True + + def __str__(self): + return "Python object" + + def __repr__(self): + return "" + + def can_coerce_to_pyobject(self, env): + return True + + def can_coerce_from_pyobject(self, env): + return True + + def can_be_optional(self): + """Returns True if type can be used with typing.Optional[].""" + return True + + def default_coerced_ctype(self): + """The default C type that this Python type coerces to, or None.""" + return None + + def assignable_from(self, src_type): + # except for pointers, conversion will be attempted + return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr + + def is_simple_buffer_dtype(self): + return True + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = "object" + else: + base_code = public_decl("PyObject", dll_linkage) + entity_code = "*%s" % entity_code + return self.base_declaration_code(base_code, entity_code) + + def as_pyobject(self, cname): + if (not self.is_complete()) or self.is_extension_type: + return "(PyObject *)" + cname + else: + return cname + + def py_type_name(self): + return "object" + + def __lt__(self, other): + """ + Make sure we sort highest, as instance checking on py_type_name + ('object') is always true + """ + return False + + def global_init_code(self, entry, code): + code.put_init_var_to_py_none(entry, nanny=False) + + def check_for_null_code(self, cname): + return cname + + def generate_incref(self, code, cname, nanny): + if nanny: + code.funcstate.needs_refnanny = True + code.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname)) + else: + code.putln("Py_INCREF(%s);" % self.as_pyobject(cname)) + + def generate_xincref(self, code, cname, nanny): + if nanny: + code.funcstate.needs_refnanny = True + code.putln("__Pyx_XINCREF(%s);" % self.as_pyobject(cname)) + else: + code.putln("Py_XINCREF(%s);" % self.as_pyobject(cname)) + + def generate_decref(self, code, cname, nanny, have_gil): + # have_gil is for the benefit of memoryviewslice - it's ignored here + assert have_gil + self._generate_decref(code, cname, nanny, null_check=False, clear=False) + + def generate_xdecref(self, code, cname, nanny, have_gil): + # in this (and other) PyObjectType functions, have_gil is being + # passed to provide a common interface with MemoryviewSlice. + # It's ignored here + self._generate_decref(code, cname, nanny, null_check=True, + clear=False) + + def generate_decref_clear(self, code, cname, clear_before_decref, nanny, have_gil): + self._generate_decref(code, cname, nanny, null_check=False, + clear=True, clear_before_decref=clear_before_decref) + + def generate_xdecref_clear(self, code, cname, clear_before_decref=False, nanny=True, have_gil=None): + self._generate_decref(code, cname, nanny, null_check=True, + clear=True, clear_before_decref=clear_before_decref) + + def generate_gotref(self, code, cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_GOTREF(%s);" % self.as_pyobject(cname)) + + def generate_xgotref(self, code, cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_XGOTREF(%s);" % self.as_pyobject(cname)) + + def generate_giveref(self, code, cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_GIVEREF(%s);" % self.as_pyobject(cname)) + + def generate_xgiveref(self, code, cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_XGIVEREF(%s);" % self.as_pyobject(cname)) + + def generate_decref_set(self, code, cname, rhs_cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_DECREF_SET(%s, %s);" % (cname, rhs_cname)) + + def generate_xdecref_set(self, code, cname, rhs_cname): + code.funcstate.needs_refnanny = True + code.putln("__Pyx_XDECREF_SET(%s, %s);" % (cname, rhs_cname)) + + def _generate_decref(self, code, cname, nanny, null_check=False, + clear=False, clear_before_decref=False): + prefix = '__Pyx' if nanny else 'Py' + X = 'X' if null_check else '' + + if nanny: + code.funcstate.needs_refnanny = True + + if clear: + if clear_before_decref: + if not nanny: + X = '' # CPython doesn't have a Py_XCLEAR() + code.putln("%s_%sCLEAR(%s);" % (prefix, X, cname)) + else: + code.putln("%s_%sDECREF(%s); %s = 0;" % ( + prefix, X, self.as_pyobject(cname), cname)) + else: + code.putln("%s_%sDECREF(%s);" % ( + prefix, X, self.as_pyobject(cname))) + + def nullcheck_string(self, cname): + return cname + + +builtin_types_that_cannot_create_refcycles = frozenset({ + 'object', 'bool', 'int', 'long', 'float', 'complex', + 'bytearray', 'bytes', 'unicode', 'str', 'basestring', +}) + +builtin_types_with_trashcan = frozenset({ + 'dict', 'list', 'set', 'frozenset', 'tuple', 'type', +}) + + +class BuiltinObjectType(PyObjectType): + # objstruct_cname string Name of PyObject struct + + is_builtin_type = 1 + has_attributes = 1 + base_type = None + module_name = '__builtin__' + require_exact = 1 + + # fields that let it look like an extension type + vtabslot_cname = None + vtabstruct_cname = None + vtabptr_cname = None + typedef_flag = True + is_external = True + decl_type = 'PyObject' + + def __init__(self, name, cname, objstruct_cname=None): + self.name = name + self.cname = cname + self.typeptr_cname = "(&%s)" % cname + self.objstruct_cname = objstruct_cname + self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles + self.builtin_trashcan = name in builtin_types_with_trashcan + if name == 'type': + # Special case the type type, as many C API calls (and other + # libraries) actually expect a PyTypeObject* for type arguments. + self.decl_type = objstruct_cname + if name == 'Exception': + self.require_exact = 0 + + def set_scope(self, scope): + self.scope = scope + if scope: + scope.parent_type = self + + def __str__(self): + return "%s object" % self.name + + def __repr__(self): + return "<%s>"% self.cname + + def default_coerced_ctype(self): + if self.name in ('bytes', 'bytearray'): + return c_char_ptr_type + elif self.name == 'bool': + return c_bint_type + elif self.name == 'float': + return c_double_type + return None + + def assignable_from(self, src_type): + if isinstance(src_type, BuiltinObjectType): + if self.name == 'basestring': + return src_type.name in ('str', 'unicode', 'basestring') + else: + return src_type.name == self.name + elif src_type.is_extension_type: + # FIXME: This is an ugly special case that we currently + # keep supporting. It allows users to specify builtin + # types as external extension types, while keeping them + # compatible with the real builtin types. We already + # generate a warning for it. Big TODO: remove! + return (src_type.module_name == '__builtin__' and + src_type.name == self.name) + else: + return True + + def typeobj_is_available(self): + return True + + def attributes_known(self): + return True + + def subtype_of(self, type): + return type.is_pyobject and type.assignable_from(self) + + def type_check_function(self, exact=True): + type_name = self.name + if type_name == 'str': + type_check = 'PyString_Check' + elif type_name == 'basestring': + type_check = '__Pyx_PyBaseString_Check' + elif type_name == 'Exception': + type_check = '__Pyx_PyException_Check' + elif type_name == 'bytearray': + type_check = 'PyByteArray_Check' + elif type_name == 'frozenset': + type_check = 'PyFrozenSet_Check' + elif type_name == 'int': + # For backwards compatibility of (Py3) 'x: int' annotations in Py2, we also allow 'long' there. + type_check = '__Pyx_Py3Int_Check' + elif type_name == "memoryview": + # capitalize doesn't catch the 'V' + type_check = "PyMemoryView_Check" + else: + type_check = 'Py%s_Check' % type_name.capitalize() + if exact and type_name not in ('bool', 'slice', 'Exception', 'memoryview'): + type_check += 'Exact' + return type_check + + def isinstance_code(self, arg): + return '%s(%s)' % (self.type_check_function(exact=False), arg) + + def type_test_code(self, arg, notnone=False, exact=True): + type_check = self.type_check_function(exact=exact) + check = 'likely(%s(%s))' % (type_check, arg) + if not notnone: + check += '||((%s) == Py_None)' % arg + if self.name == 'basestring': + name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")' + else: + name = '"%s"' % self.name + return check + ' || __Pyx_RaiseUnexpectedTypeError(%s, %s)' % (name, arg) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = self.name + else: + base_code = public_decl(self.decl_type, dll_linkage) + entity_code = "*%s" % entity_code + return self.base_declaration_code(base_code, entity_code) + + def as_pyobject(self, cname): + if self.decl_type == 'PyObject': + return cname + else: + return "(PyObject *)" + cname + + def cast_code(self, expr_code, to_object_struct = False): + return "((%s*)%s)" % ( + to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None + expr_code) + + def py_type_name(self): + return self.name + + + +class PyExtensionType(PyObjectType): + # + # A Python extension type. + # + # name string + # scope CClassScope Attribute namespace + # typedef_flag boolean + # base_type PyExtensionType or None + # module_name string or None Qualified name of defining module + # objstruct_cname string Name of PyObject struct + # objtypedef_cname string Name of PyObject struct typedef + # typeobj_cname string or None C code fragment referring to type object + # typeptr_cname string or None Name of pointer to external type object + # vtabslot_cname string Name of C method table member + # vtabstruct_cname string Name of C method table struct + # vtabptr_cname string Name of pointer to C method table + # vtable_cname string Name of C method table definition + # early_init boolean Whether to initialize early (as opposed to during module execution). + # defered_declarations [thunk] Used to declare class hierarchies in order + # is_external boolean Defined in a extern block + # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match + # dataclass_fields OrderedDict nor None Used for inheriting from dataclasses + # multiple_bases boolean Does this class have multiple bases + # has_sequence_flag boolean Set Py_TPFLAGS_SEQUENCE + + is_extension_type = 1 + has_attributes = 1 + early_init = 1 + + objtypedef_cname = None + dataclass_fields = None + multiple_bases = False + has_sequence_flag = False + + def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None): + self.name = name + self.scope = None + self.typedef_flag = typedef_flag + if base_type is not None: + base_type.is_subclassed = True + self.base_type = base_type + self.module_name = None + self.objstruct_cname = None + self.typeobj_cname = None + self.typeptr_cname = None + self.vtabslot_cname = None + self.vtabstruct_cname = None + self.vtabptr_cname = None + self.vtable_cname = None + self.is_external = is_external + self.check_size = check_size or 'warn' + self.defered_declarations = [] + + def set_scope(self, scope): + self.scope = scope + if scope: + scope.parent_type = self + + def needs_nonecheck(self): + return True + + def subtype_of_resolved_type(self, other_type): + if other_type.is_extension_type or other_type.is_builtin_type: + return self is other_type or ( + self.base_type and self.base_type.subtype_of(other_type)) + else: + return other_type is py_object_type + + def typeobj_is_available(self): + # Do we have a pointer to the type object? + return self.typeptr_cname + + def typeobj_is_imported(self): + # If we don't know the C name of the type object but we do + # know which module it's defined in, it will be imported. + return self.typeobj_cname is None and self.module_name is not None + + def assignable_from(self, src_type): + if self == src_type: + return True + if isinstance(src_type, PyExtensionType): + if src_type.base_type is not None: + return self.assignable_from(src_type.base_type) + if isinstance(src_type, BuiltinObjectType): + # FIXME: This is an ugly special case that we currently + # keep supporting. It allows users to specify builtin + # types as external extension types, while keeping them + # compatible with the real builtin types. We already + # generate a warning for it. Big TODO: remove! + return (self.module_name == '__builtin__' and + self.name == src_type.name) + return False + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0, deref = 0): + if pyrex or for_display: + base_code = self.name + else: + if self.typedef_flag: + objstruct = self.objstruct_cname + else: + objstruct = "struct %s" % self.objstruct_cname + base_code = public_decl(objstruct, dll_linkage) + if deref: + assert not entity_code + else: + entity_code = "*%s" % entity_code + return self.base_declaration_code(base_code, entity_code) + + def type_test_code(self, py_arg, notnone=False): + + none_check = "((%s) == Py_None)" % py_arg + type_check = "likely(__Pyx_TypeTest(%s, %s))" % ( + py_arg, self.typeptr_cname) + if notnone: + return type_check + else: + return "likely(%s || %s)" % (none_check, type_check) + + def attributes_known(self): + return self.scope is not None + + def __str__(self): + return self.name + + def __repr__(self): + return "" % (self.scope.class_name, + ("", " typedef")[self.typedef_flag]) + + def py_type_name(self): + if not self.module_name: + return self.name + + return "__import__(%r, None, None, ['']).%s" % (self.module_name, + self.name) + +class CType(PyrexType): + # + # Base class for all C types (non-reference-counted). + # + # to_py_function string C function for converting to Python object + # from_py_function string C function for constructing from Python object + # + + to_py_function = None + to_py_utility_code = None + from_py_function = None + from_py_utility_code = None + exception_value = None + exception_check = 1 + + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + if self.to_py_utility_code is not None: + env.use_utility_code(self.to_py_utility_code) + return True + return False + + def create_from_py_utility_code(self, env): + if self.from_py_function is not None: + if self.from_py_utility_code is not None: + env.use_utility_code(self.from_py_utility_code) + return True + return False + + def can_coerce_to_pyobject(self, env): + return self.create_to_py_utility_code(env) + + def can_coerce_from_pyobject(self, env): + return self.create_from_py_utility_code(env) + + def error_condition(self, result_code): + conds = [] + if self.is_string or self.is_pyunicode_ptr: + conds.append("(!%s)" % result_code) + elif self.exception_value is not None: + conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value)) + if self.exception_check: + conds.append("PyErr_Occurred()") + if len(conds) > 0: + return " && ".join(conds) + else: + return 0 + + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + func = self.to_py_function if to_py_function is None else to_py_function + assert func + if self.is_string or self.is_cpp_string: + if result_type.is_builtin_type: + result_type_name = result_type.name + if result_type_name in ('bytes', 'str', 'unicode'): + func = func.replace("Object", result_type_name.title(), 1) + elif result_type_name == 'bytearray': + func = func.replace("Object", "ByteArray", 1) + return '%s = %s(%s)' % ( + result_code, + func, + source_code or 'NULL') + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None, + special_none_cvalue=None): + return self._assign_from_py_code( + source_code, result_code, error_pos, code, from_py_function, error_condition, + special_none_cvalue=special_none_cvalue) + + + +class PythranExpr(CType): + # Pythran object of a given type + + to_py_function = "__Pyx_pythran_to_python" + is_pythran_expr = True + writable = True + has_attributes = 1 + + def __init__(self, pythran_type, org_buffer=None): + self.org_buffer = org_buffer + self.pythran_type = pythran_type + self.name = self.pythran_type + self.cname = self.pythran_type + self.from_py_function = "from_python<%s>" % (self.pythran_type) + self.scope = None + + def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): + assert not pyrex + return "%s %s" % (self.cname, entity_code) + + def attributes_known(self): + if self.scope is None: + from . import Symtab + # FIXME: fake C scope, might be better represented by a struct or C++ class scope + self.scope = scope = Symtab.CClassScope( + '', None, visibility="extern", parent_type=self + ) + scope.directives = {} + + scope.declare_var("ndim", c_long_type, pos=None, cname="value", is_cdef=True) + scope.declare_cproperty( + "shape", c_ptr_type(c_long_type), "__Pyx_PythranShapeAccessor", + doc="Pythran array shape", + visibility="extern", + nogil=True, + ) + + return True + + def __eq__(self, other): + return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type + + def __ne__(self, other): + return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type) + + def __hash__(self): + return hash(self.pythran_type) + + +class CConstOrVolatileType(BaseType): + "A C const or volatile type" + + subtypes = ['cv_base_type'] + + is_cv_qualified = 1 + + def __init__(self, base_type, is_const=0, is_volatile=0): + self.cv_base_type = base_type + self.is_const = is_const + self.is_volatile = is_volatile + if base_type.has_attributes and base_type.scope is not None: + from .Symtab import CConstOrVolatileScope + self.scope = CConstOrVolatileScope(base_type.scope, is_const, is_volatile) + + def cv_string(self): + cvstring = "" + if self.is_const: + cvstring = "const " + cvstring + if self.is_volatile: + cvstring = "volatile " + cvstring + return cvstring + + def __repr__(self): + return "" % (self.cv_string(), self.cv_base_type) + + def __str__(self): + return self.declaration_code("", for_display=1) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + cv = self.cv_string() + if for_display or pyrex: + return cv + self.cv_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) + else: + return self.cv_base_type.declaration_code(cv + entity_code, for_display, dll_linkage, pyrex) + + def specialize(self, values): + base_type = self.cv_base_type.specialize(values) + if base_type == self.cv_base_type: + return self + return CConstOrVolatileType(base_type, + self.is_const, self.is_volatile) + + def deduce_template_params(self, actual): + return self.cv_base_type.deduce_template_params(actual) + + def can_coerce_to_pyobject(self, env): + return self.cv_base_type.can_coerce_to_pyobject(env) + + def can_coerce_from_pyobject(self, env): + return self.cv_base_type.can_coerce_from_pyobject(env) + + def create_to_py_utility_code(self, env): + if self.cv_base_type.create_to_py_utility_code(env): + self.to_py_function = self.cv_base_type.to_py_function + return True + + def same_as_resolved_type(self, other_type): + if other_type.is_cv_qualified: + return self.cv_base_type.same_as_resolved_type(other_type.cv_base_type) + # Accept cv LHS <- non-cv RHS. + return self.cv_base_type.same_as_resolved_type(other_type) + + def __getattr__(self, name): + return getattr(self.cv_base_type, name) + + +def CConstType(base_type): + return CConstOrVolatileType(base_type, is_const=1) + + +class FusedType(CType): + """ + Represents a Fused Type. All it needs to do is keep track of the types + it aggregates, as it will be replaced with its specific version wherever + needed. + + See http://wiki.cython.org/enhancements/fusedtypes + + types [PyrexType] is the list of types to be fused + name str the name of the ctypedef + """ + + is_fused = 1 + exception_check = 0 + + def __init__(self, types, name=None): + # Use list rather than set to preserve order (list should be short). + flattened_types = [] + for t in types: + if t.is_fused: + # recursively merge in subtypes + if isinstance(t, FusedType): + t_types = t.types + else: + # handle types that aren't a fused type themselves but contain fused types + # for example a C++ template where the template type is fused. + t_fused_types = t.get_fused_types() + t_types = [] + for substitution in product( + *[fused_type.types for fused_type in t_fused_types] + ): + t_types.append( + t.specialize( + { + fused_type: sub + for fused_type, sub in zip( + t_fused_types, substitution + ) + } + ) + ) + for subtype in t_types: + if subtype not in flattened_types: + flattened_types.append(subtype) + elif t not in flattened_types: + flattened_types.append(t) + self.types = flattened_types + self.name = name + + def declaration_code(self, entity_code, for_display = 0, + dll_linkage = None, pyrex = 0): + if pyrex or for_display: + return self.name + + raise Exception("This may never happen, please report a bug") + + def __repr__(self): + return 'FusedType(name=%r)' % self.name + + def specialize(self, values): + if self in values: + return values[self] + else: + raise CannotSpecialize() + + def get_fused_types(self, result=None, seen=None, include_function_return_type=False): + if result is None: + return [self] + + if self not in seen: + result.append(self) + seen.add(self) + + +class CVoidType(CType): + # + # C "void" type + # + + is_void = 1 + to_py_function = "__Pyx_void_to_None" + + def __repr__(self): + return "" + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = "void" + else: + base_code = public_decl("void", dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def is_complete(self): + return 0 + +class InvisibleVoidType(CVoidType): + # + # For use with C++ constructors and destructors return types. + # Acts like void, but does not print out a declaration. + # + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = "[void]" + else: + base_code = public_decl("", dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + +class CNumericType(CType): + # + # Base class for all C numeric types. + # + # rank integer Relative size + # signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed + # + + is_numeric = 1 + default_value = "0" + has_attributes = True + scope = None + + sign_words = ("unsigned ", "", "signed ") + + def __init__(self, rank, signed = 1): + self.rank = rank + if rank > 0 and signed == SIGNED: + # Signed is meaningless for anything but char, and complicates + # type promotion. + signed = 1 + self.signed = signed + + def sign_and_name(self): + s = self.sign_words[self.signed] + n = rank_to_type_name[self.rank] + return s + n + + def is_simple_buffer_dtype(self): + return True + + def __repr__(self): + return "" % self.sign_and_name() + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + type_name = self.sign_and_name() + if pyrex or for_display: + base_code = type_name.replace('PY_LONG_LONG', 'long long') + else: + base_code = public_decl(type_name, dll_linkage) + base_code = StringEncoding.EncodedString(base_code) + return self.base_declaration_code(base_code, entity_code) + + def attributes_known(self): + if self.scope is None: + from . import Symtab + self.scope = scope = Symtab.CClassScope( + '', + None, + visibility="extern", + parent_type=self) + scope.directives = {} + scope.declare_cfunction( + "conjugate", + CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), + pos=None, + defining=1, + cname=" ") + return True + + def __lt__(self, other): + """Sort based on rank, preferring signed over unsigned""" + if other.is_numeric: + return self.rank > other.rank and self.signed >= other.signed + + # Prefer numeric types over others + return True + + def py_type_name(self): + if self.rank <= 4: + return "int" + return "float" + + +class ForbidUseClass: + def __repr__(self): + raise RuntimeError() + def __str__(self): + raise RuntimeError() +ForbidUse = ForbidUseClass() + + +class CIntLike(object): + """Mixin for shared behaviour of C integers and enums. + """ + to_py_function = None + from_py_function = None + to_pyunicode_utility = None + default_format_spec = 'd' + + def can_coerce_to_pyobject(self, env): + return True + + def can_coerce_from_pyobject(self, env): + return True + + def create_to_py_utility_code(self, env): + if type(self).to_py_function is None: + self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() + env.use_utility_code(TempitaUtilityCode.load_cached( + "CIntToPy", "TypeConversion.c", + context={"TYPE": self.empty_declaration_code(), + "TO_PY_FUNCTION": self.to_py_function})) + return True + + def create_from_py_utility_code(self, env): + if type(self).from_py_function is None: + self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() + env.use_utility_code(TempitaUtilityCode.load_cached( + "CIntFromPy", "TypeConversion.c", + context={ + "TYPE": self.empty_declaration_code(), + "FROM_PY_FUNCTION": self.from_py_function, + "IS_ENUM": self.is_enum, + })) + return True + + @staticmethod + def _parse_format(format_spec): + padding = ' ' + if not format_spec: + return ('d', 0, padding) + format_type = format_spec[-1] + if format_type in ('o', 'd', 'x', 'X'): + prefix = format_spec[:-1] + elif format_type.isdigit(): + format_type = 'd' + prefix = format_spec + else: + return (None, 0, padding) + if not prefix: + return (format_type, 0, padding) + if prefix[0] == '-': + prefix = prefix[1:] + if prefix and prefix[0] == '0': + padding = '0' + prefix = prefix.lstrip('0') + if prefix.isdigit(): + return (format_type, int(prefix), padding) + return (None, 0, padding) + + def can_coerce_to_pystring(self, env, format_spec=None): + format_type, width, padding = self._parse_format(format_spec) + return format_type is not None and width <= 2**30 + + def convert_to_pystring(self, cvalue, code, format_spec=None): + if self.to_pyunicode_utility is None: + utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name() + to_pyunicode_utility = TempitaUtilityCode.load_cached( + "CIntToPyUnicode", "TypeConversion.c", + context={"TYPE": self.empty_declaration_code(), + "TO_PY_FUNCTION": utility_code_name}) + self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility) + else: + utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility + code.globalstate.use_utility_code(to_pyunicode_utility) + format_type, width, padding_char = self._parse_format(format_spec) + return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type) + + +class CIntType(CIntLike, CNumericType): + + is_int = 1 + typedef_flag = 0 + exception_value = -1 + + def get_to_py_type_conversion(self): + if self.rank < list(rank_to_type_name).index('int'): + # This assumes sizeof(short) < sizeof(int) + return "PyInt_FromLong" + else: + # Py{Int|Long}_From[Unsigned]Long[Long] + Prefix = "Int" + SignWord = "" + TypeName = "Long" + if not self.signed: + Prefix = "Long" + SignWord = "Unsigned" + if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'): + Prefix = "Long" + TypeName = "LongLong" + return "Py%s_From%s%s" % (Prefix, SignWord, TypeName) + + def assignable_from_resolved_type(self, src_type): + return src_type.is_int or src_type.is_enum or src_type is error_type + + def invalid_value(self): + if rank_to_type_name[int(self.rank)] == 'char': + return "'?'" + else: + # We do not really know the size of the type, so return + # a 32-bit literal and rely on casting to final type. It will + # be negative for signed ints, which is good. + return "0xbad0bad0" + + def overflow_check_binop(self, binop, env, const_rhs=False): + env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) + type = self.empty_declaration_code() + name = self.specialization_name() + if binop == "lshift": + env.use_utility_code(TempitaUtilityCode.load_cached( + "LeftShift", "Overflow.c", + context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) + else: + if const_rhs: + binop += "_const" + if type in ('int', 'long', 'long long'): + env.use_utility_code(TempitaUtilityCode.load_cached( + "BaseCaseSigned", "Overflow.c", + context={'INT': type, 'NAME': name})) + elif type in ('unsigned int', 'unsigned long', 'unsigned long long'): + env.use_utility_code(TempitaUtilityCode.load_cached( + "BaseCaseUnsigned", "Overflow.c", + context={'UINT': type, 'NAME': name})) + elif self.rank <= 1: + # sizeof(short) < sizeof(int) + return "__Pyx_%s_%s_no_overflow" % (binop, name) + else: + _load_overflow_base(env) + env.use_utility_code(TempitaUtilityCode.load_cached( + "SizeCheck", "Overflow.c", + context={'TYPE': type, 'NAME': name})) + env.use_utility_code(TempitaUtilityCode.load_cached( + "Binop", "Overflow.c", + context={'TYPE': type, 'NAME': name, 'BINOP': binop})) + return "__Pyx_%s_%s_checking_overflow" % (binop, name) + + +def _load_overflow_base(env): + env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) + for type in ('int', 'long', 'long long'): + env.use_utility_code(TempitaUtilityCode.load_cached( + "BaseCaseSigned", "Overflow.c", + context={'INT': type, 'NAME': type.replace(' ', '_')})) + for type in ('unsigned int', 'unsigned long', 'unsigned long long'): + env.use_utility_code(TempitaUtilityCode.load_cached( + "BaseCaseUnsigned", "Overflow.c", + context={'UINT': type, 'NAME': type.replace(' ', '_')})) + + +class CAnonEnumType(CIntType): + + is_enum = 1 + + def sign_and_name(self): + return 'int' + + def specialization_name(self): + # ensure that the to/from Python functions don't conflict with + # "int" + return '__pyx_anon_enum' + + +class CReturnCodeType(CIntType): + + to_py_function = "__Pyx_Owned_Py_None" + + is_returncode = True + exception_check = False + default_format_spec = '' + + def specialization_name(self): + # I don't think we should end up creating PyInt_As_int/PyInt_From_int functions + # for this type, but it's better they're distinct in case it happens. + return super(CReturnCodeType, self).specialization_name() + "return_code" + + def can_coerce_to_pystring(self, env, format_spec=None): + return not format_spec + + def convert_to_pystring(self, cvalue, code, format_spec=None): + return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname + + +class CBIntType(CIntType): + + to_py_function = "__Pyx_PyBool_FromLong" + from_py_function = "__Pyx_PyObject_IsTrue" + exception_check = 1 # for C++ bool + default_format_spec = '' + + def can_coerce_to_pystring(self, env, format_spec=None): + return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec) + + def convert_to_pystring(self, cvalue, code, format_spec=None): + if format_spec: + return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec) + # NOTE: no caching here as the string constant cnames depend on the current module + utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name() + to_pyunicode_utility = TempitaUtilityCode.load_cached( + "CBIntToPyUnicode", "TypeConversion.c", context={ + "TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname, + "FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname, + "TO_PY_FUNCTION": utility_code_name, + }) + code.globalstate.use_utility_code(to_pyunicode_utility) + return "%s(%s)" % (utility_code_name, cvalue) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if for_display: + base_code = 'bool' + elif pyrex: + base_code = 'bint' + else: + base_code = public_decl('int', dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def specialization_name(self): + return "bint" + + def __repr__(self): + return "" + + def __str__(self): + return 'bint' + + def py_type_name(self): + return "bool" + + +class CPyUCS4IntType(CIntType): + # Py_UCS4 + + is_unicode_char = True + + # Py_UCS4 coerces from and to single character unicode strings (or + # at most two characters on 16bit Unicode builds), but we also + # allow Python integers as input. The value range for Py_UCS4 + # is 0..1114111, which is checked when converting from an integer + # value. + + to_py_function = "__Pyx_PyUnicode_FromOrdinal" + from_py_function = "__Pyx_PyObject_AsPy_UCS4" + + def can_coerce_to_pystring(self, env, format_spec=None): + return False # does the right thing anyway + + def create_from_py_utility_code(self, env): + env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c")) + return True + + def sign_and_name(self): + return "Py_UCS4" + + +class CPyUnicodeIntType(CIntType): + # Py_UNICODE + + is_unicode_char = True + + # Py_UNICODE coerces from and to single character unicode strings, + # but we also allow Python integers as input. The value range for + # Py_UNICODE is 0..1114111, which is checked when converting from + # an integer value. + + to_py_function = "__Pyx_PyUnicode_FromOrdinal" + from_py_function = "__Pyx_PyObject_AsPy_UNICODE" + + def can_coerce_to_pystring(self, env, format_spec=None): + return False # does the right thing anyway + + def create_from_py_utility_code(self, env): + env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c")) + return True + + def sign_and_name(self): + return "Py_UNICODE" + + +class CPyHashTType(CIntType): + + to_py_function = "__Pyx_PyInt_FromHash_t" + from_py_function = "__Pyx_PyInt_AsHash_t" + + def sign_and_name(self): + return "Py_hash_t" + +class CPySSizeTType(CIntType): + + to_py_function = "PyInt_FromSsize_t" + from_py_function = "__Pyx_PyIndex_AsSsize_t" + + def sign_and_name(self): + return "Py_ssize_t" + +class CSSizeTType(CIntType): + + to_py_function = "PyInt_FromSsize_t" + from_py_function = "PyInt_AsSsize_t" + + def sign_and_name(self): + return "Py_ssize_t" + +class CSizeTType(CIntType): + + to_py_function = "__Pyx_PyInt_FromSize_t" + + def sign_and_name(self): + return "size_t" + +class CPtrdiffTType(CIntType): + + def sign_and_name(self): + return "ptrdiff_t" + + +class CFloatType(CNumericType): + + is_float = 1 + to_py_function = "PyFloat_FromDouble" + from_py_function = "__pyx_PyFloat_AsDouble" + + exception_value = -1 + + def __init__(self, rank, math_h_modifier = ''): + CNumericType.__init__(self, rank, 1) + self.math_h_modifier = math_h_modifier + if rank == RANK_FLOAT: + self.from_py_function = "__pyx_PyFloat_AsFloat" + + def assignable_from_resolved_type(self, src_type): + return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type + + def invalid_value(self): + return Naming.PYX_NAN + +class CComplexType(CNumericType): + + is_complex = 1 + has_attributes = 1 + scope = None + + @property + def to_py_function(self): + return "__pyx_PyComplex_FromComplex%s" % self.implementation_suffix + + def __init__(self, real_type): + while real_type.is_typedef and not real_type.typedef_is_external: + real_type = real_type.typedef_base_type + self.funcsuffix = "_%s" % real_type.specialization_name() + if not real_type.is_float: + # neither C nor C++ supports non-floating complex numbers, + # so fall back the on Cython implementation. + self.implementation_suffix = "_Cy" + elif real_type.is_typedef and real_type.typedef_is_external: + # C can't handle typedefs in complex numbers, + # so in this case also fall back on the Cython implementation. + self.implementation_suffix = "_CyTypedef" + else: + self.implementation_suffix = "" + if real_type.is_float: + self.math_h_modifier = real_type.math_h_modifier + else: + self.math_h_modifier = "_UNUSED" + + self.real_type = real_type + CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed) + self.binops = {} + self.from_parts = "%s_from_parts" % self.specialization_name() + self.default_value = "%s(0, 0)" % self.from_parts + + def __eq__(self, other): + if isinstance(self, CComplexType) and isinstance(other, CComplexType): + return self.real_type == other.real_type + else: + return False + + def __ne__(self, other): + if isinstance(self, CComplexType) and isinstance(other, CComplexType): + return self.real_type != other.real_type + else: + return True + + def __lt__(self, other): + if isinstance(self, CComplexType) and isinstance(other, CComplexType): + return self.real_type < other.real_type + else: + # this is arbitrary, but it makes sure we always have + # *some* kind of order + return False + + def __hash__(self): + return ~hash(self.real_type) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex) + base_code = "%s complex" % real_code + else: + base_code = public_decl(self.sign_and_name(), dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def sign_and_name(self): + real_type_name = self.real_type.specialization_name() + real_type_name = real_type_name.replace('long__double','long_double') + real_type_name = real_type_name.replace('PY_LONG_LONG','long_long') + return Naming.type_prefix + real_type_name + "_complex" + + def assignable_from(self, src_type): + # Temporary hack/feature disabling, see #441 + if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef + and src_type.typedef_is_external): + return False + elif src_type.is_pyobject: + return True + else: + return super(CComplexType, self).assignable_from(src_type) + + def assignable_from_resolved_type(self, src_type): + return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type) + or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type) + or src_type is error_type) + + def attributes_known(self): + if self.scope is None: + from . import Symtab + self.scope = scope = Symtab.CClassScope( + '', + None, + visibility="extern", + parent_type=self) + scope.directives = {} + scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True) + scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True) + scope.declare_cfunction( + "conjugate", + CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), + pos=None, + defining=1, + cname="__Pyx_c_conj%s" % self.funcsuffix) + + return True + + def _utility_code_context(self): + return { + 'type': self.empty_declaration_code(), + 'type_name': self.specialization_name(), + 'real_type': self.real_type.empty_declaration_code(), + 'func_suffix': self.funcsuffix, + 'm': self.math_h_modifier, + 'is_float': int(self.real_type.is_float), + 'is_extern_float_typedef': int( + self.real_type.is_float and self.real_type.is_typedef and self.real_type.typedef_is_external) + } + + def create_declaration_utility_code(self, env): + # This must always be run, because a single CComplexType instance can be shared + # across multiple compilations (the one created in the module scope) + if self.real_type.is_float: + env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c')) + utility_code_context = self._utility_code_context() + env.use_utility_code(UtilityCode.load_cached( + 'RealImag' + self.implementation_suffix, 'Complex.c')) + env.use_utility_code(TempitaUtilityCode.load_cached( + 'Declarations', 'Complex.c', utility_code_context)) + env.use_utility_code(TempitaUtilityCode.load_cached( + 'Arithmetic', 'Complex.c', utility_code_context)) + return True + + def can_coerce_to_pyobject(self, env): + return True + + def can_coerce_from_pyobject(self, env): + return True + + def create_to_py_utility_code(self, env): + env.use_utility_code(TempitaUtilityCode.load_cached( + 'ToPy', 'Complex.c', self._utility_code_context())) + return True + + def create_from_py_utility_code(self, env): + env.use_utility_code(TempitaUtilityCode.load_cached( + 'FromPy', 'Complex.c', self._utility_code_context())) + self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name() + return True + + def lookup_op(self, nargs, op): + try: + return self.binops[nargs, op] + except KeyError: + pass + try: + op_name = complex_ops[nargs, op] + self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix) + return func_name + except KeyError: + return None + + def unary_op(self, op): + return self.lookup_op(1, op) + + def binary_op(self, op): + return self.lookup_op(2, op) + + def py_type_name(self): + return "complex" + + def cast_code(self, expr_code): + return expr_code + + def real_code(self, expr_code): + return "__Pyx_CREAL%s(%s)" % (self.implementation_suffix, expr_code) + + def imag_code(self, expr_code): + return "__Pyx_CIMAG%s(%s)" % (self.implementation_suffix, expr_code) + +complex_ops = { + (1, '-'): 'neg', + (1, 'zero'): 'is_zero', + (2, '+'): 'sum', + (2, '-'): 'diff', + (2, '*'): 'prod', + (2, '/'): 'quot', + (2, '**'): 'pow', + (2, '=='): 'eq', +} + + +class SoftCComplexType(CComplexType): + """ + a**b in Python can return either a complex or a float + depending on the sign of a. This "soft complex" type is + stored as a C complex (and so is a little slower than a + direct C double) but it prints/coerces to a float if + the imaginary part is 0. Therefore it provides a C + representation of the Python behaviour. + """ + + to_py_function = "__pyx_Py_FromSoftComplex" + + def __init__(self): + super(SoftCComplexType, self).__init__(c_double_type) + + def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): + base_result = super(SoftCComplexType, self).declaration_code( + entity_code, + for_display=for_display, + dll_linkage=dll_linkage, + pyrex=pyrex, + ) + if for_display: + return "soft %s" % base_result + else: + return base_result + + def create_to_py_utility_code(self, env): + env.use_utility_code(UtilityCode.load_cached('SoftComplexToPy', 'Complex.c')) + return True + + def __repr__(self): + result = super(SoftCComplexType, self).__repr__() + assert result[-1] == ">" + return "%s (soft)%s" % (result[:-1], result[-1]) + +class CPyTSSTType(CType): + # + # PEP-539 "Py_tss_t" type + # + + declaration_value = "Py_tss_NEEDS_INIT" + + def __repr__(self): + return "" + + def declaration_code(self, entity_code, + for_display=0, dll_linkage=None, pyrex=0): + if pyrex or for_display: + base_code = "Py_tss_t" + else: + base_code = public_decl("Py_tss_t", dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + +class CPointerBaseType(CType): + # common base type for pointer/array types + # + # base_type CType Reference type + + subtypes = ['base_type'] + + def __init__(self, base_type): + self.base_type = base_type + if base_type.is_cv_qualified: + base_type = base_type.cv_base_type + for char_type in (c_char_type, c_uchar_type, c_schar_type): + if base_type.same_as(char_type): + self.is_string = 1 + break + else: + if base_type.same_as(c_py_unicode_type): + self.is_pyunicode_ptr = 1 + + if self.is_string and not base_type.is_error: + if base_type.signed == 2: + self.to_py_function = "__Pyx_PyObject_FromCString" + if self.is_ptr: + self.from_py_function = "__Pyx_PyObject_As%sSString" + elif base_type.signed: + self.to_py_function = "__Pyx_PyObject_FromString" + if self.is_ptr: + self.from_py_function = "__Pyx_PyObject_As%sString" + else: + self.to_py_function = "__Pyx_PyObject_FromCString" + if self.is_ptr: + self.from_py_function = "__Pyx_PyObject_As%sUString" + if self.is_ptr: + self.from_py_function %= '' if self.base_type.is_const else 'Writable' + self.exception_value = "NULL" + elif self.is_pyunicode_ptr and not base_type.is_error: + self.to_py_function = "__Pyx_PyUnicode_FromUnicode" + self.to_py_utility_code = UtilityCode.load_cached( + "pyunicode_from_unicode", "StringTools.c") + if self.is_ptr: + self.from_py_function = "__Pyx_PyUnicode_AsUnicode" + self.exception_value = "NULL" + + def py_type_name(self): + if self.is_string: + return "bytes" + elif self.is_pyunicode_ptr: + return "unicode" + else: + return super(CPointerBaseType, self).py_type_name() + + def literal_code(self, value): + if self.is_string: + assert isinstance(value, str) + return '"%s"' % StringEncoding.escape_byte_string(value) + return str(value) + + +class CArrayType(CPointerBaseType): + # base_type CType Element type + # size integer or None Number of elements + + is_array = 1 + to_tuple_function = None + + def __init__(self, base_type, size): + super(CArrayType, self).__init__(base_type) + self.size = size + + def __eq__(self, other): + if isinstance(other, CType) and other.is_array and self.size == other.size: + return self.base_type.same_as(other.base_type) + return False + + def __hash__(self): + return hash(self.base_type) + 28 # arbitrarily chosen offset + + def __repr__(self): + return "" % (self.size, repr(self.base_type)) + + def same_as_resolved_type(self, other_type): + return ((other_type.is_array and + self.base_type.same_as(other_type.base_type)) + or other_type is error_type) + + def assignable_from_resolved_type(self, src_type): + # C arrays are assigned by value, either Python containers or C arrays/pointers + if src_type.is_pyobject: + return True + if src_type.is_ptr or src_type.is_array: + return self.base_type.assignable_from(src_type.base_type) + return False + + def element_ptr_type(self): + return c_ptr_type(self.base_type) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if self.size is not None: + dimension_code = self.size + else: + dimension_code = "" + if entity_code.startswith("*"): + entity_code = "(%s)" % entity_code + return self.base_type.declaration_code( + "%s[%s]" % (entity_code, dimension_code), + for_display, dll_linkage, pyrex) + + def as_argument_type(self): + return c_ptr_type(self.base_type) + + def is_complete(self): + return self.size is not None + + def specialize(self, values): + base_type = self.base_type.specialize(values) + if base_type == self.base_type: + return self + else: + return CArrayType(base_type, self.size) + + def deduce_template_params(self, actual): + if isinstance(actual, CArrayType): + return self.base_type.deduce_template_params(actual.base_type) + else: + return {} + + def can_coerce_to_pyobject(self, env): + return self.base_type.can_coerce_to_pyobject(env) + + def can_coerce_from_pyobject(self, env): + return self.base_type.can_coerce_from_pyobject(env) + + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + return self.to_py_function + if not self.base_type.create_to_py_utility_code(env): + return False + + safe_typename = self.base_type.specialization_name() + to_py_function = "__Pyx_carray_to_py_%s" % safe_typename + to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename + + from .UtilityCode import CythonUtilityCode + context = { + 'cname': to_py_function, + 'to_tuple_cname': to_tuple_function, + 'base_type': self.base_type, + } + env.use_utility_code(CythonUtilityCode.load( + "carray.to_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.to_tuple_function = to_tuple_function + self.to_py_function = to_py_function + return True + + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + func = self.to_py_function if to_py_function is None else to_py_function + if self.is_string or self.is_pyunicode_ptr: + return '%s = %s(%s)' % ( + result_code, + func, + source_code) + target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple' + return '%s = %s(%s, %s)' % ( + result_code, + self.to_tuple_function if target_is_tuple else func, + source_code, + self.size) + + def create_from_py_utility_code(self, env): + if self.from_py_function is not None: + return self.from_py_function + if not self.base_type.create_from_py_utility_code(env): + return False + + from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name() + + from .UtilityCode import CythonUtilityCode + context = { + 'cname': from_py_function, + 'base_type': self.base_type, + } + env.use_utility_code(CythonUtilityCode.load( + "carray.from_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.from_py_function = from_py_function + return True + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None, + special_none_cvalue=None): + assert not error_condition, '%s: %s' % (error_pos, error_condition) + assert not special_none_cvalue, '%s: %s' % (error_pos, special_none_cvalue) # not currently supported + call_code = "%s(%s, %s, %s)" % ( + from_py_function or self.from_py_function, + source_code, result_code, self.size) + return code.error_goto_if_neg(call_code, error_pos) + + def error_condition(self, result_code): + # It isn't possible to use CArrays as return type so the error_condition + # is irrelevant. Returning a falsy value does avoid an error when getting + # from_py_call_code from a typedef. + return "" + + +class CPtrType(CPointerBaseType): + # base_type CType Reference type + + is_ptr = 1 + default_value = "0" + exception_value = "NULL" + + def __hash__(self): + return hash(self.base_type) + 27 # arbitrarily chosen offset + + def __eq__(self, other): + if isinstance(other, CType) and other.is_ptr: + return self.base_type.same_as(other.base_type) + return False + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "" % repr(self.base_type) + + def same_as_resolved_type(self, other_type): + return ((other_type.is_ptr and + self.base_type.same_as(other_type.base_type)) + or other_type is error_type) + + def is_simple_buffer_dtype(self): + return True + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + #print "CPtrType.declaration_code: pointer to", self.base_type ### + return self.base_type.declaration_code( + "*%s" % entity_code, + for_display, dll_linkage, pyrex) + + def assignable_from_resolved_type(self, other_type): + if other_type is error_type: + return 1 + if other_type.is_null_ptr: + return 1 + if self.base_type.is_cv_qualified: + self = CPtrType(self.base_type.cv_base_type) + if self.base_type.is_cfunction: + if other_type.is_ptr: + other_type = other_type.base_type.resolve() + if other_type.is_cfunction: + return self.base_type.pointer_assignable_from_resolved_type(other_type) + else: + return 0 + if (self.base_type.is_cpp_class and other_type.is_ptr + and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)): + return 1 + if other_type.is_array or other_type.is_ptr: + return self.base_type.is_void or self.base_type.same_as(other_type.base_type) + return 0 + + def assignment_failure_extra_info(self, src_type, src_name): + if self.base_type.is_cfunction and src_type.is_ptr: + src_type = src_type.base_type.resolve() + if self.base_type.is_cfunction and src_type.is_cfunction: + copied_src_type = copy.copy(src_type) + # make the exception values the same as us + copied_src_type.exception_check = self.base_type.exception_check + copied_src_type.exception_value = self.base_type.exception_value + if self.base_type.pointer_assignable_from_resolved_type(copied_src_type): + # the only reason we can't assign is because of exception incompatibility + msg = "Exception values are incompatible." + if not self.base_type.exception_check and not self.base_type.exception_value: + if src_name is None: + src_name = "the value being assigned" + else: + src_name = "'{}'".format(src_name) + msg += " Suggest adding 'noexcept' to the type of {0}.".format(src_name) + return msg + return super(CPtrType, self).assignment_failure_extra_info(src_type, src_name) + + def specialize(self, values): + base_type = self.base_type.specialize(values) + if base_type == self.base_type: + return self + else: + return CPtrType(base_type) + + def deduce_template_params(self, actual): + if isinstance(actual, CPtrType): + return self.base_type.deduce_template_params(actual.base_type) + else: + return {} + + def invalid_value(self): + return "1" + + def find_cpp_operation_type(self, operator, operand_type=None): + if self.base_type.is_cpp_class: + return self.base_type.find_cpp_operation_type(operator, operand_type) + return None + + def get_fused_types(self, result=None, seen=None, include_function_return_type=False): + # For function pointers, include the return type - unlike for fused functions themselves, + # where the return type cannot be an independent fused type (i.e. is derived or non-fused). + return super(CPointerBaseType, self).get_fused_types(result, seen, include_function_return_type=True) + + +class CNullPtrType(CPtrType): + + is_null_ptr = 1 + + +class CReferenceBaseType(BaseType): + + is_fake_reference = 0 + + # Common base type for C reference and C++ rvalue reference types. + + subtypes = ['ref_base_type'] + + def __init__(self, base_type): + self.ref_base_type = base_type + + def __repr__(self): + return "<%r %s>" % (self.__class__.__name__, self.ref_base_type) + + def specialize(self, values): + base_type = self.ref_base_type.specialize(values) + if base_type == self.ref_base_type: + return self + else: + return type(self)(base_type) + + def deduce_template_params(self, actual): + return self.ref_base_type.deduce_template_params(actual) + + def __getattr__(self, name): + return getattr(self.ref_base_type, name) + + +class CReferenceType(CReferenceBaseType): + + is_reference = 1 + + def __str__(self): + return "%s &" % self.ref_base_type + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + #print "CReferenceType.declaration_code: pointer to", self.base_type ### + return self.ref_base_type.declaration_code( + "&%s" % entity_code, + for_display, dll_linkage, pyrex) + + +class CFakeReferenceType(CReferenceType): + + is_fake_reference = 1 + + def __str__(self): + return "%s [&]" % self.ref_base_type + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + #print "CReferenceType.declaration_code: pointer to", self.base_type ### + return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code) + + +class CppRvalueReferenceType(CReferenceBaseType): + + is_rvalue_reference = 1 + + def __str__(self): + return "%s &&" % self.ref_base_type + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + return self.ref_base_type.declaration_code( + "&&%s" % entity_code, + for_display, dll_linkage, pyrex) + + +class CFuncType(CType): + # return_type CType + # args [CFuncTypeArg] + # has_varargs boolean + # exception_value string + # exception_check boolean True if PyErr_Occurred check needed + # calling_convention string Function calling convention + # nogil boolean Can be called without gil + # with_gil boolean Acquire gil around function body + # templates [string] or None + # cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd + # from_fused boolean Indicates whether this is a specialized + # C function + # is_strict_signature boolean function refuses to accept coerced arguments + # (used for optimisation overrides) + # is_const_method boolean + # is_static_method boolean + # op_arg_struct CPtrType Pointer to optional argument struct + + is_cfunction = 1 + original_sig = None + cached_specialized_types = None + from_fused = False + is_const_method = False + op_arg_struct = None + + subtypes = ['return_type', 'args'] + + def __init__(self, return_type, args, has_varargs = 0, + exception_value = None, exception_check = 0, calling_convention = "", + nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0, + is_const_method = False, is_static_method=False, + templates = None, is_strict_signature = False): + self.return_type = return_type + self.args = args + self.has_varargs = has_varargs + self.optional_arg_count = optional_arg_count + self.exception_value = exception_value + self.exception_check = exception_check + self.calling_convention = calling_convention + self.nogil = nogil + self.with_gil = with_gil + self.is_overridable = is_overridable + self.is_const_method = is_const_method + self.is_static_method = is_static_method + self.templates = templates + self.is_strict_signature = is_strict_signature + + def __repr__(self): + arg_reprs = list(map(repr, self.args)) + if self.has_varargs: + arg_reprs.append("...") + if self.exception_value: + except_clause = " %r" % self.exception_value + else: + except_clause = "" + if self.exception_check: + except_clause += "?" + return "" % ( + repr(self.return_type), + self.calling_convention_prefix(), + ",".join(arg_reprs), + except_clause) + + def with_with_gil(self, with_gil): + if with_gil == self.with_gil: + return self + else: + return CFuncType( + self.return_type, self.args, self.has_varargs, + self.exception_value, self.exception_check, + self.calling_convention, self.nogil, + with_gil, + self.is_overridable, self.optional_arg_count, + self.is_const_method, self.is_static_method, + self.templates, self.is_strict_signature) + + def calling_convention_prefix(self): + cc = self.calling_convention + if cc: + return cc + " " + else: + return "" + + def as_argument_type(self): + return c_ptr_type(self) + + def same_c_signature_as(self, other_type, as_cmethod = 0): + return self.same_c_signature_as_resolved_type( + other_type.resolve(), as_cmethod) + + def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False, + exact_semantics=True): + # If 'exact_semantics' is false, allow any equivalent C signatures + # if the Cython semantics are compatible, i.e. the same or wider for 'other_type'. + + #print "CFuncType.same_c_signature_as_resolved_type:", \ + # self, other_type, "as_cmethod =", as_cmethod ### + if other_type is error_type: + return 1 + if not other_type.is_cfunction: + return 0 + if self.is_overridable != other_type.is_overridable: + return 0 + nargs = len(self.args) + if nargs != len(other_type.args): + return 0 + # When comparing C method signatures, the first argument + # is exempt from compatibility checking (the proper check + # is performed elsewhere). + for i in range(as_cmethod, nargs): + if not self.args[i].type.same_as(other_type.args[i].type): + return 0 + if self.has_varargs != other_type.has_varargs: + return 0 + if self.optional_arg_count != other_type.optional_arg_count: + return 0 + if as_pxd_definition: + # A narrowing of the return type declared in the pxd is allowed. + if not self.return_type.subtype_of_resolved_type(other_type.return_type): + return 0 + else: + if not self.return_type.same_as(other_type.return_type): + return 0 + if not self.same_calling_convention_as(other_type): + return 0 + if exact_semantics: + if self.exception_check != other_type.exception_check: + return 0 + if not self._same_exception_value(other_type.exception_value): + return 0 + elif not self._is_exception_compatible_with(other_type): + return 0 + return 1 + + def _same_exception_value(self, other_exc_value): + if self.exception_value == other_exc_value: + return 1 + if self.exception_check != '+': + return 0 + if not self.exception_value or not other_exc_value: + return 0 + if self.exception_value.type != other_exc_value.type: + return 0 + if self.exception_value.entry and other_exc_value.entry: + if self.exception_value.entry.cname != other_exc_value.entry.cname: + return 0 + if self.exception_value.name != other_exc_value.name: + return 0 + return 1 + + def compatible_signature_with(self, other_type, as_cmethod = 0): + return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod) + + def compatible_signature_with_resolved_type(self, other_type, as_cmethod): + #print "CFuncType.same_c_signature_as_resolved_type:", \ + # self, other_type, "as_cmethod =", as_cmethod ### + if other_type is error_type: + return 1 + if not other_type.is_cfunction: + return 0 + if not self.is_overridable and other_type.is_overridable: + return 0 + nargs = len(self.args) + if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count: + return 0 + if self.optional_arg_count < other_type.optional_arg_count: + return 0 + # When comparing C method signatures, the first argument + # is exempt from compatibility checking (the proper check + # is performed elsewhere). + for i in range(as_cmethod, len(other_type.args)): + if not self.args[i].type.same_as( + other_type.args[i].type): + return 0 + if self.has_varargs != other_type.has_varargs: + return 0 + if not self.return_type.subtype_of_resolved_type(other_type.return_type): + return 0 + if not self.same_calling_convention_as(other_type): + return 0 + if self.nogil != other_type.nogil: + return 0 + if not self._is_exception_compatible_with(other_type): + return 0 + self.original_sig = other_type.original_sig or other_type + return 1 + + def _is_exception_compatible_with(self, other_type): + # narrower exception checks are ok, but prevent mismatches + if self.exception_check == '+' and other_type.exception_check != '+': + # must catch C++ exceptions if we raise them + return 0 + if not other_type.exception_check or other_type.exception_value is not None: + # There's no problem if this type doesn't emit exceptions but the other type checks + if other_type.exception_check and not (self.exception_check or self.exception_value): + return 1 + # if other does not *always* check exceptions, self must comply + if not self._same_exception_value(other_type.exception_value): + return 0 + if self.exception_check and self.exception_check != other_type.exception_check: + # a redundant exception check doesn't make functions incompatible, but a missing one does + return 0 + return 1 + + def narrower_c_signature_than(self, other_type, as_cmethod = 0): + return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod) + + def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod): + if other_type is error_type: + return 1 + if not other_type.is_cfunction: + return 0 + nargs = len(self.args) + if nargs != len(other_type.args): + return 0 + for i in range(as_cmethod, nargs): + if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type): + return 0 + else: + self.args[i].needs_type_test = other_type.args[i].needs_type_test \ + or not self.args[i].type.same_as(other_type.args[i].type) + if self.has_varargs != other_type.has_varargs: + return 0 + if self.optional_arg_count != other_type.optional_arg_count: + return 0 + if not self.return_type.subtype_of_resolved_type(other_type.return_type): + return 0 + if not self.exception_check and other_type.exception_check: + # a redundant exception check doesn't make functions incompatible, but a missing one does + return 0 + if not self._same_exception_value(other_type.exception_value): + return 0 + return 1 + + def same_calling_convention_as(self, other): + ## XXX Under discussion ... + ## callspec_words = ("__stdcall", "__cdecl", "__fastcall") + ## cs1 = self.calling_convention + ## cs2 = other.calling_convention + ## if (cs1 in callspec_words or + ## cs2 in callspec_words): + ## return cs1 == cs2 + ## else: + ## return True + sc1 = self.calling_convention == '__stdcall' + sc2 = other.calling_convention == '__stdcall' + return sc1 == sc2 + + def same_as_resolved_type(self, other_type, as_cmethod=False): + return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \ + and self.nogil == other_type.nogil + + def pointer_assignable_from_resolved_type(self, rhs_type): + # Accept compatible exception/nogil declarations for the RHS. + if rhs_type is error_type: + return 1 + if not rhs_type.is_cfunction: + return 0 + return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \ + and not (self.nogil and not rhs_type.nogil) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0, + with_calling_convention = 1): + arg_decl_list = [] + for arg in self.args[:len(self.args)-self.optional_arg_count]: + arg_decl_list.append( + arg.type.declaration_code("", for_display, pyrex = pyrex)) + if self.is_overridable: + arg_decl_list.append("int %s" % Naming.skip_dispatch_cname) + if self.optional_arg_count: + if self.op_arg_struct: + arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname)) + else: + # op_arg_struct may not be initialized at this point if this class is being used + # to prepare a Python error message or similar. In this case, just omit the args. + assert for_display + if self.has_varargs: + arg_decl_list.append("...") + arg_decl_code = ", ".join(arg_decl_list) + if not arg_decl_code and not pyrex: + arg_decl_code = "void" + trailer = "" + if (pyrex or for_display) and not self.return_type.is_pyobject: + if self.exception_value and self.exception_check: + trailer = " except? %s" % self.exception_value + elif self.exception_value and not self.exception_check: + trailer = " except %s" % self.exception_value + elif not self.exception_value and not self.exception_check: + trailer = " noexcept" + elif self.exception_check == '+': + trailer = " except +" + elif self.exception_check and for_display: + # not spelled out by default, unless for human eyes + trailer = " except *" + if self.nogil: + trailer += " nogil" + if not with_calling_convention: + cc = '' + else: + cc = self.calling_convention_prefix() + if (not entity_code and cc) or entity_code.startswith("*"): + entity_code = "(%s%s)" % (cc, entity_code) + cc = "" + if self.is_const_method: + trailer += " const" + return self.return_type.declaration_code( + "%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer), + for_display, dll_linkage, pyrex) + + def function_header_code(self, func_name, arg_code): + if self.is_const_method: + trailer = " const" + else: + trailer = "" + return "%s%s(%s)%s" % (self.calling_convention_prefix(), + func_name, arg_code, trailer) + + def signature_string(self): + s = self.empty_declaration_code() + return s + + def signature_cast_string(self): + s = self.declaration_code("(*)", with_calling_convention=False) + return '(%s)' % s + + def specialize(self, values): + result = CFuncType(self.return_type.specialize(values), + [arg.specialize(values) for arg in self.args], + has_varargs = self.has_varargs, + exception_value = self.exception_value, + exception_check = self.exception_check, + calling_convention = self.calling_convention, + nogil = self.nogil, + with_gil = self.with_gil, + is_overridable = self.is_overridable, + optional_arg_count = self.optional_arg_count, + is_const_method = self.is_const_method, + is_static_method = self.is_static_method, + templates = self.templates) + + result.from_fused = self.is_fused + return result + + def opt_arg_cname(self, arg_name): + return self.op_arg_struct.base_type.scope.lookup(arg_name).cname + + # Methods that deal with Fused Types + # All but map_with_specific_entries should be called only on functions + # with fused types (and not on their corresponding specific versions). + + def get_all_specialized_permutations(self, fused_types=None): + """ + Permute all the types. For every specific instance of a fused type, we + want all other specific instances of all other fused types. + + It returns an iterable of two-tuples of the cname that should prefix + the cname of the function, and a dict mapping any fused types to their + respective specific types. + """ + assert self.is_fused + + if fused_types is None: + fused_types = self.get_fused_types() + + return get_all_specialized_permutations(fused_types) + + def get_all_specialized_function_types(self): + """ + Get all the specific function types of this one. + """ + assert self.is_fused + + if self.entry.fused_cfunction: + return [n.type for n in self.entry.fused_cfunction.nodes] + elif self.cached_specialized_types is not None: + return self.cached_specialized_types + + result = [] + permutations = self.get_all_specialized_permutations() + + new_cfunc_entries = [] + for cname, fused_to_specific in permutations: + new_func_type = self.entry.type.specialize(fused_to_specific) + + if self.optional_arg_count: + # Remember, this method is set by CFuncDeclaratorNode + self.declare_opt_arg_struct(new_func_type, cname) + + new_entry = copy.deepcopy(self.entry) + new_func_type.specialize_entry(new_entry, cname) + + new_entry.type = new_func_type + new_func_type.entry = new_entry + result.append(new_func_type) + + new_cfunc_entries.append(new_entry) + + cfunc_entries = self.entry.scope.cfunc_entries + try: + cindex = cfunc_entries.index(self.entry) + except ValueError: + cfunc_entries.extend(new_cfunc_entries) + else: + cfunc_entries[cindex:cindex+1] = new_cfunc_entries + + self.cached_specialized_types = result + + return result + + def get_fused_types(self, result=None, seen=None, subtypes=None, include_function_return_type=False): + """Return fused types in the order they appear as parameter types""" + return super(CFuncType, self).get_fused_types( + result, seen, + # for function pointer types, we consider the result type; for plain function + # types we don't (because it must be derivable from the arguments) + subtypes=self.subtypes if include_function_return_type else ['args']) + + def specialize_entry(self, entry, cname): + assert not self.is_fused + specialize_entry(entry, cname) + + def can_coerce_to_pyobject(self, env): + # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code + if self.has_varargs or self.optional_arg_count: + return False + if self.to_py_function is not None: + return self.to_py_function + for arg in self.args: + if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env): + return False + if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env): + return False + return True + + def create_to_py_utility_code(self, env): + # FIXME: it seems we're trying to coerce in more cases than we should + if self.to_py_function is not None: + return self.to_py_function + if not self.can_coerce_to_pyobject(env): + return False + from .UtilityCode import CythonUtilityCode + + # include argument names into the c function name to ensure cname is unique + # between functions with identical types but different argument names + from .Symtab import punycodify_name + def arg_name_part(arg): + return "%s%s" % (len(arg.name), punycodify_name(arg.name)) if arg.name else "0" + arg_names = [ arg_name_part(arg) for arg in self.args ] + arg_names = cap_length("_".join(arg_names)) + safe_typename = type_identifier(self, pyrex=True) + # Note that the length here is slightly bigger than twice the default cap in + # "cap_length" (since the length is capped in both arg_names and the type_identifier) + # but since this is significantly shorter than compilers should be able to handle, + # that is acceptable. + to_py_function = "__Pyx_CFunc_%s_to_py_%s" % (safe_typename, arg_names) + + for arg in self.args: + if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env): + return False + if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env): + return False + + def declared_type(ctype): + type_displayname = str(ctype.declaration_code("", for_display=True)) + if ctype.is_pyobject: + arg_ctype = type_name = type_displayname + if ctype.is_builtin_type: + arg_ctype = ctype.name + elif not ctype.is_extension_type: + type_name = 'object' + type_displayname = None + else: + type_displayname = repr(type_displayname) + elif ctype is c_bint_type: + type_name = arg_ctype = 'bint' + else: + type_name = arg_ctype = type_displayname + if ctype is c_double_type: + type_displayname = 'float' + else: + type_displayname = repr(type_displayname) + return type_name, arg_ctype, type_displayname + + class Arg(object): + def __init__(self, arg_name, arg_type): + self.name = arg_name + self.type = arg_type + self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type) + + if self.return_type.is_void: + except_clause = 'except *' + elif self.return_type.is_pyobject: + except_clause = '' + elif self.exception_value: + except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value + else: + except_clause = 'except *' + + context = { + 'cname': to_py_function, + 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)], + 'return_type': Arg('return', self.return_type), + 'except_clause': except_clause, + } + # FIXME: directives come from first defining environment and do not adapt for reuse + env.use_utility_code(CythonUtilityCode.load( + "cfunc.to_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.to_py_function = to_py_function + return True + + +def specialize_entry(entry, cname): + """ + Specialize an entry of a copied fused function or method + """ + entry.is_fused_specialized = True + entry.name = get_fused_cname(cname, entry.name) + + if entry.is_cmethod: + entry.cname = entry.name + if entry.is_inherited: + entry.cname = StringEncoding.EncodedString( + "%s.%s" % (Naming.obj_base_cname, entry.cname)) + else: + entry.cname = get_fused_cname(cname, entry.cname) + + if entry.func_cname: + entry.func_cname = get_fused_cname(cname, entry.func_cname) + if entry.final_func_cname: + entry.final_func_cname = get_fused_cname(cname, entry.final_func_cname) + +def get_fused_cname(fused_cname, orig_cname): + """ + Given the fused cname id and an original cname, return a specialized cname + """ + assert fused_cname and orig_cname + return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix, + fused_cname, orig_cname)) + +def unique(somelist): + seen = set() + result = [] + for obj in somelist: + if obj not in seen: + result.append(obj) + seen.add(obj) + + return result + +def get_all_specialized_permutations(fused_types): + return _get_all_specialized_permutations(unique(fused_types)) + +def _get_all_specialized_permutations(fused_types, id="", f2s=()): + fused_type, = fused_types[0].get_fused_types() + result = [] + + for newid, specific_type in enumerate(fused_type.types): + # f2s = dict(f2s, **{ fused_type: specific_type }) + f2s = dict(f2s) + f2s.update({ fused_type: specific_type }) + + if id: + cname = '%s_%s' % (id, newid) + else: + cname = str(newid) + + if len(fused_types) > 1: + result.extend(_get_all_specialized_permutations( + fused_types[1:], cname, f2s)) + else: + result.append((cname, f2s)) + + return result + +def specialization_signature_string(fused_compound_type, fused_to_specific): + """ + Return the signature for a specialization of a fused type. e.g. + + floating[:] -> + 'float' or 'double' + + cdef fused ft: + float[:] + double[:] + + ft -> + 'float[:]' or 'double[:]' + + integral func(floating) -> + 'int (*func)(float)' or ... + """ + fused_types = fused_compound_type.get_fused_types() + if len(fused_types) == 1: + fused_type = fused_types[0] + else: + fused_type = fused_compound_type + + return fused_type.specialize(fused_to_specific).typeof_name() + + +def get_specialized_types(type): + """ + Return a list of specialized types in their declared order. + """ + assert type.is_fused + + if isinstance(type, FusedType): + result = list(type.types) + for specialized_type in result: + specialized_type.specialization_string = specialized_type.typeof_name() + else: + result = [] + for cname, f2s in get_all_specialized_permutations(type.get_fused_types()): + specialized_type = type.specialize(f2s) + specialized_type.specialization_string = ( + specialization_signature_string(type, f2s)) + result.append(specialized_type) + + return result + + +class CFuncTypeArg(BaseType): + # name string + # cname string + # type PyrexType + # pos source file position + + # FIXME: is this the right setup? should None be allowed here? + not_none = False + or_none = False + accept_none = True + accept_builtin_subtypes = False + annotation = None + + subtypes = ['type'] + + def __init__(self, name, type, pos, cname=None, annotation=None): + self.name = name + if cname is not None: + self.cname = cname + else: + self.cname = Naming.var_prefix + name + if annotation is not None: + self.annotation = annotation + self.type = type + self.pos = pos + self.needs_type_test = False # TODO: should these defaults be set in analyse_types()? + + def __repr__(self): + return "%s:%s" % (self.name, repr(self.type)) + + def declaration_code(self, for_display = 0): + return self.type.declaration_code(self.cname, for_display) + + def specialize(self, values): + return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname) + + def is_forwarding_reference(self): + if self.type.is_rvalue_reference: + if (isinstance(self.type.ref_base_type, TemplatePlaceholderType) + and not self.type.ref_base_type.is_cv_qualified): + return True + return False + +class ToPyStructUtilityCode(object): + + requires = None + + def __init__(self, type, forward_decl, env): + self.type = type + self.header = "static PyObject* %s(%s)" % (type.to_py_function, + type.declaration_code('s')) + self.forward_decl = forward_decl + self.env = env + + def __eq__(self, other): + return isinstance(other, ToPyStructUtilityCode) and self.header == other.header + + def __hash__(self): + return hash(self.header) + + def get_tree(self, **kwargs): + pass + + def put_code(self, output): + code = output['utility_code_def'] + proto = output['utility_code_proto'] + + code.putln("%s {" % self.header) + code.putln("PyObject* res;") + code.putln("PyObject* member;") + code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" % + len(self.type.scope.var_entries)) + for member in self.type.scope.var_entries: + nameconst_cname = code.get_py_string_const(member.name, identifier=True) + code.putln("%s; if (unlikely(!member)) goto bad;" % ( + member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type))) + code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname) + code.putln("Py_DECREF(member);") + code.putln("return res;") + code.putln("bad:") + code.putln("Py_XDECREF(member);") + code.putln("Py_DECREF(res);") + code.putln("return NULL;") + code.putln("}") + + # This is a bit of a hack, we need a forward declaration + # due to the way things are ordered in the module... + if self.forward_decl: + proto.putln(self.type.empty_declaration_code() + ';') + proto.putln(self.header + ";") + + def inject_tree_and_scope_into(self, module_node): + pass + + +class CStructOrUnionType(CType): + # name string + # cname string + # kind string "struct" or "union" + # scope StructOrUnionScope, or None if incomplete + # typedef_flag boolean + # packed boolean + + # entry Entry + + is_struct_or_union = 1 + has_attributes = 1 + exception_check = True + + def __init__(self, name, kind, scope, typedef_flag, cname, packed=False, in_cpp=False): + self.name = name + self.cname = cname + self.kind = kind + self.scope = scope + self.typedef_flag = typedef_flag + self.is_struct = kind == 'struct' + self.to_py_function = "%s_to_py_%s" % ( + Naming.convert_func_prefix, self.specialization_name()) + self.from_py_function = "%s_from_py_%s" % ( + Naming.convert_func_prefix, self.specialization_name()) + self.exception_check = True + self._convert_to_py_code = None + self._convert_from_py_code = None + self.packed = packed + self.needs_cpp_construction = self.is_struct and in_cpp + + def can_coerce_to_pyobject(self, env): + if self._convert_to_py_code is False: + return None # tri-state-ish + + if env.outer_scope is None: + return False + + if self._convert_to_py_code is None: + is_union = not self.is_struct + unsafe_union_types = set() + safe_union_types = set() + for member in self.scope.var_entries: + member_type = member.type + if not member_type.can_coerce_to_pyobject(env): + self.to_py_function = None + self._convert_to_py_code = False + return False + if is_union: + if member_type.is_ptr or member_type.is_cpp_class: + unsafe_union_types.add(member_type) + else: + safe_union_types.add(member_type) + + if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1): + # unsafe mix of safe and unsafe to convert types + self.from_py_function = None + self._convert_from_py_code = False + return False + + return True + + def create_to_py_utility_code(self, env): + if not self.can_coerce_to_pyobject(env): + return False + + if self._convert_to_py_code is None: + for member in self.scope.var_entries: + member.type.create_to_py_utility_code(env) + forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag + self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env) + + env.use_utility_code(self._convert_to_py_code) + return True + + def can_coerce_from_pyobject(self, env): + if env.outer_scope is None or self._convert_from_py_code is False: + return False + for member in self.scope.var_entries: + if not member.type.can_coerce_from_pyobject(env): + return False + return True + + def create_from_py_utility_code(self, env): + if env.outer_scope is None: + return False + + if self._convert_from_py_code is False: + return None # tri-state-ish + + if self._convert_from_py_code is None: + if not self.scope.var_entries: + # There are obviously missing fields; don't allow instantiation + # where absolutely no content is provided. + return False + + for member in self.scope.var_entries: + if not member.type.create_from_py_utility_code(env): + self.from_py_function = None + self._convert_from_py_code = False + return False + + context = dict( + struct_type=self, + var_entries=self.scope.var_entries, + funcname=self.from_py_function, + ) + env.use_utility_code(UtilityCode.load_cached("RaiseUnexpectedTypeError", "ObjectHandling.c")) + from .UtilityCode import CythonUtilityCode + self._convert_from_py_code = CythonUtilityCode.load( + "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility", + "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context) + + env.use_utility_code(self._convert_from_py_code) + return True + + def __repr__(self): + return "" % ( + self.name, self.cname, + ("", " typedef")[self.typedef_flag]) + + def declaration_code(self, entity_code, + for_display=0, dll_linkage=None, pyrex=0): + if pyrex or for_display: + base_code = self.name + else: + if self.typedef_flag: + base_code = self.cname + else: + base_code = "%s %s" % (self.kind, self.cname) + base_code = public_decl(base_code, dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def __eq__(self, other): + try: + return (isinstance(other, CStructOrUnionType) and + self.name == other.name) + except AttributeError: + return False + + def __lt__(self, other): + try: + return self.name < other.name + except AttributeError: + # this is arbitrary, but it makes sure we always have + # *some* kind of order + return False + + def __hash__(self): + return hash(self.cname) ^ hash(self.kind) + + def is_complete(self): + return self.scope is not None + + def attributes_known(self): + return self.is_complete() + + def can_be_complex(self): + # Does the struct consist of exactly two identical floats? + fields = self.scope.var_entries + if len(fields) != 2: return False + a, b = fields + return (a.type.is_float and b.type.is_float and + a.type.empty_declaration_code() == + b.type.empty_declaration_code()) + + def struct_nesting_depth(self): + child_depths = [x.type.struct_nesting_depth() + for x in self.scope.var_entries] + return max(child_depths) + 1 + + def cast_code(self, expr_code): + if self.is_struct: + return expr_code + return super(CStructOrUnionType, self).cast_code(expr_code) + +cpp_string_conversions = ("std::string",) + +builtin_cpp_conversions = { + # type element template params + "std::pair": 2, + "std::vector": 1, + "std::list": 1, + "std::set": 1, + "std::unordered_set": 1, + "std::map": 2, + "std::unordered_map": 2, + "std::complex": 1, +} + +class CppClassType(CType): + # name string + # cname string + # scope CppClassScope + # templates [string] or None + + is_cpp_class = 1 + has_attributes = 1 + needs_cpp_construction = 1 + exception_check = True + namespace = None + + # For struct-like declaration. + kind = "struct" + packed = False + typedef_flag = False + + subtypes = ['templates'] + + def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None): + self.name = name + self.cname = cname + self.scope = scope + self.base_classes = base_classes + self.operators = [] + self.templates = templates + self.template_type = template_type + self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ()) + if templates: + self.specializations = {tuple(zip(templates, templates)): self} + else: + self.specializations = {} + self.is_cpp_string = cname in cpp_string_conversions + + def use_conversion_utility(self, from_or_to): + pass + + def maybe_unordered(self): + if 'unordered' in self.cname: + return 'unordered_' + else: + return '' + + def can_coerce_from_pyobject(self, env): + if self.cname in builtin_cpp_conversions: + template_count = builtin_cpp_conversions[self.cname] + for ix, T in enumerate(self.templates or []): + if ix >= template_count: + break + if T.is_pyobject or not T.can_coerce_from_pyobject(env): + return False + return True + elif self.cname in cpp_string_conversions: + return True + return False + + def create_from_py_utility_code(self, env): + if self.from_py_function is not None: + return True + if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: + X = "XYZABC" + tags = [] + context = {} + for ix, T in enumerate(self.templates or []): + if ix >= builtin_cpp_conversions[self.cname]: + break + if T.is_pyobject or not T.create_from_py_utility_code(env): + return False + tags.append(T.specialization_name()) + context[X[ix]] = T + + if self.cname in cpp_string_conversions: + cls = 'string' + tags = type_identifier(self), + else: + cls = self.cname[5:] + cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags)) + context.update({ + 'cname': cname, + 'maybe_unordered': self.maybe_unordered(), + 'type': self.cname, + }) + # Override directives that should not be inherited from user code. + from .UtilityCode import CythonUtilityCode + directives = CythonUtilityCode.filter_inherited_directives(env.directives) + env.use_utility_code(CythonUtilityCode.load( + cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", + context=context, compiler_directives=directives)) + self.from_py_function = cname + return True + + def can_coerce_to_pyobject(self, env): + if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: + for ix, T in enumerate(self.templates or []): + if ix >= builtin_cpp_conversions[self.cname]: + break + if T.is_pyobject or not T.can_coerce_to_pyobject(env): + return False + return True + + + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + return True + if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: + X = "XYZABC" + tags = [] + context = {} + for ix, T in enumerate(self.templates or []): + if ix >= builtin_cpp_conversions[self.cname]: + break + if not T.create_to_py_utility_code(env): + return False + tags.append(T.specialization_name()) + context[X[ix]] = T + + if self.cname in cpp_string_conversions: + cls = 'string' + prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode + tags = type_identifier(self), + else: + cls = self.cname[5:] + prefix = '' + cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags)) + context.update({ + 'cname': cname, + 'maybe_unordered': self.maybe_unordered(), + 'type': self.cname, + }) + from .UtilityCode import CythonUtilityCode + # Override directives that should not be inherited from user code. + directives = CythonUtilityCode.filter_inherited_directives(env.directives) + env.use_utility_code(CythonUtilityCode.load( + cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", + context=context, compiler_directives=directives)) + self.to_py_function = cname + return True + + def is_template_type(self): + return self.templates is not None and self.template_type is None + + def get_fused_types(self, result=None, seen=None, include_function_return_type=False): + if result is None: + result = [] + seen = set() + if self.namespace: + self.namespace.get_fused_types(result, seen) + if self.templates: + for T in self.templates: + T.get_fused_types(result, seen) + return result + + def specialize_here(self, pos, env, template_values=None): + if not self.is_template_type(): + error(pos, "'%s' type is not a template" % self) + return error_type + if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates): + num_defaults = len(self.templates) - len(template_values) + partial_specialization = self.declaration_code('', template_params=template_values) + # Most of the time we don't need to declare anything typed to these + # default template arguments, but when we do there's no way in C++ + # to reference this directly. However, it is common convention to + # provide a typedef in the template class that resolves to each + # template type. For now, allow the user to specify this name as + # the template parameter. + # TODO: Allow typedefs in cpp classes and search for it in this + # classes scope as a concrete name we could use. + template_values = template_values + [ + TemplatePlaceholderType( + "%s::%s" % (partial_specialization, param.name), True) + for param in self.templates[-num_defaults:]] + if len(self.templates) != len(template_values): + error(pos, "%s templated type receives %d arguments, got %d" % + (self.name, len(self.templates), len(template_values))) + return error_type + has_object_template_param = False + for value in template_values: + if value.is_pyobject or value.needs_refcounting: + has_object_template_param = True + type_description = "Python object" if value.is_pyobject else "Reference-counted" + error(pos, + "%s type '%s' cannot be used as a template argument" % ( + type_description, value)) + if has_object_template_param: + return error_type + return self.specialize(dict(zip(self.templates, template_values))) + + def specialize(self, values): + if not self.templates and not self.namespace: + return self + if self.templates is None: + self.templates = [] + key = tuple(values.items()) + if key in self.specializations: + return self.specializations[key] + template_values = [t.specialize(values) for t in self.templates] + specialized = self.specializations[key] = \ + CppClassType(self.name, None, self.cname, [], template_values, template_type=self) + # Need to do these *after* self.specializations[key] is set + # to avoid infinite recursion on circular references. + specialized.base_classes = [b.specialize(values) for b in self.base_classes] + if self.namespace is not None: + specialized.namespace = self.namespace.specialize(values) + specialized.scope = self.scope.specialize(values, specialized) + if self.cname == 'std::vector': + # vector is special cased in the C++ standard, and its + # accessors do not necessarily return references to the underlying + # elements (which may be bit-packed). + # http://www.cplusplus.com/reference/vector/vector-bool/ + # Here we pretend that the various methods return bool values + # (as the actual returned values are coercible to such, and + # we don't support call expressions as lvalues). + T = values.get(self.templates[0], None) + if T and not T.is_fused and T.empty_declaration_code() == 'bool': + for bit_ref_returner in ('at', 'back', 'front'): + if bit_ref_returner in specialized.scope.entries: + specialized.scope.entries[bit_ref_returner].type.return_type = T + return specialized + + def deduce_template_params(self, actual): + if actual.is_cv_qualified: + actual = actual.cv_base_type + if actual.is_reference: + actual = actual.ref_base_type + if self == actual: + return {} + elif actual.is_cpp_class: + self_template_type = self + while getattr(self_template_type, 'template_type', None): + self_template_type = self_template_type.template_type + def all_bases(cls): + yield cls + for parent in cls.base_classes: + for base in all_bases(parent): + yield base + for actual_base in all_bases(actual): + template_type = actual_base + while getattr(template_type, 'template_type', None): + template_type = template_type.template_type + if (self_template_type.empty_declaration_code() + == template_type.empty_declaration_code()): + return reduce( + merge_template_deductions, + [formal_param.deduce_template_params(actual_param) + for (formal_param, actual_param) + in zip(self.templates, actual_base.templates)], + {}) + else: + return {} + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0, + template_params = None): + if template_params is None: + template_params = self.templates + if self.templates: + template_strings = [param.declaration_code('', for_display, None, pyrex) + for param in template_params + if not is_optional_template_param(param) and not param.is_fused] + if for_display: + brackets = "[%s]" + else: + brackets = "<%s> " + templates = brackets % ",".join(template_strings) + else: + templates = "" + if pyrex or for_display: + base_code = "%s%s" % (self.name, templates) + else: + base_code = "%s%s" % (self.cname, templates) + if self.namespace is not None: + base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code) + base_code = public_decl(base_code, dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def cpp_optional_declaration_code(self, entity_code, dll_linkage=None, template_params=None): + return "__Pyx_Optional_Type<%s> %s" % ( + self.declaration_code("", False, dll_linkage, False, + template_params), + entity_code) + + def is_subclass(self, other_type): + if self.same_as_resolved_type(other_type): + return 1 + for base_class in self.base_classes: + if base_class.is_subclass(other_type): + return 1 + return 0 + + def subclass_dist(self, super_type): + if self.same_as_resolved_type(super_type): + return 0 + elif not self.base_classes: + return float('inf') + else: + return 1 + min(b.subclass_dist(super_type) for b in self.base_classes) + + def same_as_resolved_type(self, other_type): + if other_type.is_cpp_class: + if self == other_type: + return 1 + # This messy logic is needed due to GH Issue #1852. + elif (self.cname == other_type.cname and + (self.template_type and other_type.template_type + or self.templates + or other_type.templates)): + if self.templates == other_type.templates: + return 1 + for t1, t2 in zip(self.templates, other_type.templates): + if is_optional_template_param(t1) and is_optional_template_param(t2): + break + if not t1.same_as_resolved_type(t2): + return 0 + return 1 + return 0 + + def assignable_from_resolved_type(self, other_type): + # TODO: handle operator=(...) here? + if other_type is error_type: + return True + elif other_type.is_cpp_class: + return other_type.is_subclass(self) + elif other_type.is_string and self.cname in cpp_string_conversions: + return True + + def attributes_known(self): + return self.scope is not None + + def find_cpp_operation_type(self, operator, operand_type=None): + operands = [self] + if operand_type is not None: + operands.append(operand_type) + # pos == None => no errors + operator_entry = self.scope.lookup_operator_for_types(None, operator, operands) + if not operator_entry: + return None + func_type = operator_entry.type + if func_type.is_ptr: + func_type = func_type.base_type + return func_type.return_type + + def get_constructor(self, pos): + constructor = self.scope.lookup('') + if constructor is not None: + return constructor + + # Otherwise: automatically declare no-args default constructor. + # Make it "nogil" if the base classes allow it. + nogil = True + for base in self.base_classes: + base_constructor = base.scope.lookup('') + if base_constructor and not base_constructor.type.nogil: + nogil = False + break + + func_type = CFuncType(self, [], exception_check='+', nogil=nogil) + return self.scope.declare_cfunction(u'', func_type, pos) + + def check_nullary_constructor(self, pos, msg="stack allocated"): + constructor = self.scope.lookup(u'') + if constructor is not None and best_match([], constructor.all_alternatives()) is None: + error(pos, "C++ class must have a nullary constructor to be %s" % msg) + + def cpp_optional_check_for_null_code(self, cname): + # only applies to c++ classes that are being declared as std::optional + return "(%s.has_value())" % cname + + +class EnumMixin(object): + """ + Common implementation details for C and C++ enums. + """ + + def create_enum_to_py_utility_code(self, env): + from .UtilityCode import CythonUtilityCode + self.to_py_function = "__Pyx_Enum_%s_to_py" % type_identifier(self) + if self.entry.scope != env.global_scope(): + module_name = self.entry.scope.qualified_name + else: + module_name = None + + directives = CythonUtilityCode.filter_inherited_directives( + env.global_scope().directives) + if any(value_entry.enum_int_value is None for value_entry in self.entry.enum_values): + # We're at a high risk of making a switch statement with equal values in + # (because we simply can't tell, and enums are often used like that). + # So turn off the switch optimization to be safe. + # (Note that for now Cython doesn't do the switch optimization for + # scoped enums anyway) + directives['optimize.use_switch'] = False + + if self.is_cpp_enum: + underlying_type_str = self.underlying_type.empty_declaration_code() + else: + underlying_type_str = "int" + + env.use_utility_code(CythonUtilityCode.load( + "EnumTypeToPy", "CpdefEnums.pyx", + context={"funcname": self.to_py_function, + "name": self.name, + "items": tuple(self.values), + "underlying_type": underlying_type_str, + "module_name": module_name, + "is_flag": not self.is_cpp_enum, + }, + outer_module_scope=self.entry.scope, # ensure that "name" is findable + compiler_directives = directives, + )) + + +class CppScopedEnumType(CType, EnumMixin): + # name string + # doc string or None + # cname string + + is_cpp_enum = True + + def __init__(self, name, cname, underlying_type, namespace=None, doc=None): + self.name = name + self.doc = doc + self.cname = cname + self.values = [] + self.underlying_type = underlying_type + self.namespace = namespace + + def __str__(self): + return self.name + + def declaration_code(self, entity_code, + for_display=0, dll_linkage=None, pyrex=0): + if pyrex or for_display: + type_name = self.name + else: + if self.namespace: + type_name = "%s::%s" % ( + self.namespace.empty_declaration_code(), + self.cname + ) + else: + type_name = "__PYX_ENUM_CLASS_DECL %s" % self.cname + type_name = public_decl(type_name, dll_linkage) + return self.base_declaration_code(type_name, entity_code) + + def create_from_py_utility_code(self, env): + if self.from_py_function: + return True + if self.underlying_type.create_from_py_utility_code(env): + self.from_py_function = '(%s)%s' % ( + self.cname, self.underlying_type.from_py_function + ) + return True + + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + return True + if self.entry.create_wrapper: + self.create_enum_to_py_utility_code(env) + return True + if self.underlying_type.create_to_py_utility_code(env): + # Using a C++11 lambda here, which is fine since + # scoped enums are a C++11 feature + self.to_py_function = '[](const %s& x){return %s((%s)x);}' % ( + self.cname, + self.underlying_type.to_py_function, + self.underlying_type.empty_declaration_code() + ) + return True + + def create_type_wrapper(self, env): + from .UtilityCode import CythonUtilityCode + rst = CythonUtilityCode.load( + "CppScopedEnumType", "CpdefEnums.pyx", + context={ + "name": self.name, + "cname": self.cname.split("::")[-1], + "items": tuple(self.values), + "underlying_type": self.underlying_type.empty_declaration_code(), + "enum_doc": self.doc, + "static_modname": env.qualified_name, + }, + outer_module_scope=env.global_scope()) + + env.use_utility_code(rst) + + +class TemplatePlaceholderType(CType): + + def __init__(self, name, optional=False): + self.name = name + self.optional = optional + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if entity_code: + return self.name + " " + entity_code + else: + return self.name + + def specialize(self, values): + if self in values: + return values[self] + else: + return self + + def deduce_template_params(self, actual): + return {self: actual} + + def same_as_resolved_type(self, other_type): + if isinstance(other_type, TemplatePlaceholderType): + return self.name == other_type.name + else: + return 0 + + def __hash__(self): + return hash(self.name) + + def __cmp__(self, other): + if isinstance(other, TemplatePlaceholderType): + return cmp(self.name, other.name) + else: + return cmp(type(self), type(other)) + + def __eq__(self, other): + if isinstance(other, TemplatePlaceholderType): + return self.name == other.name + else: + return False + +def is_optional_template_param(type): + return isinstance(type, TemplatePlaceholderType) and type.optional + + +class CEnumType(CIntLike, CType, EnumMixin): + # name string + # doc string or None + # cname string or None + # typedef_flag boolean + # values [string], populated during declaration analysis + + is_enum = 1 + signed = 1 + rank = -1 # Ranks below any integer type + + def __init__(self, name, cname, typedef_flag, namespace=None, doc=None): + self.name = name + self.doc = doc + self.cname = cname + self.values = [] + self.typedef_flag = typedef_flag + self.namespace = namespace + self.default_value = "(%s) 0" % self.empty_declaration_code() + + def __str__(self): + return self.name + + def __repr__(self): + return "" % (self.name, self.cname, + ("", " typedef")[self.typedef_flag]) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + base_code = self.name + else: + if self.namespace: + base_code = "%s::%s" % ( + self.namespace.empty_declaration_code(), self.cname) + elif self.typedef_flag: + base_code = self.cname + else: + base_code = "enum %s" % self.cname + base_code = public_decl(base_code, dll_linkage) + return self.base_declaration_code(base_code, entity_code) + + def specialize(self, values): + if self.namespace: + namespace = self.namespace.specialize(values) + if namespace != self.namespace: + return CEnumType( + self.name, self.cname, self.typedef_flag, namespace) + return self + + def create_type_wrapper(self, env): + from .UtilityCode import CythonUtilityCode + # Generate "int"-like conversion function + old_to_py_function = self.to_py_function + self.to_py_function = None + CIntLike.create_to_py_utility_code(self, env) + enum_to_pyint_func = self.to_py_function + self.to_py_function = old_to_py_function # we don't actually want to overwrite this + + env.use_utility_code(CythonUtilityCode.load( + "EnumType", "CpdefEnums.pyx", + context={"name": self.name, + "items": tuple(self.values), + "enum_doc": self.doc, + "enum_to_pyint_func": enum_to_pyint_func, + "static_modname": env.qualified_name, + }, + outer_module_scope=env.global_scope())) + + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + return self.to_py_function + if not self.entry.create_wrapper: + return super(CEnumType, self).create_to_py_utility_code(env) + self.create_enum_to_py_utility_code(env) + return True + + +class CTupleType(CType): + # components [PyrexType] + + is_ctuple = True + + subtypes = ['components'] + + def __init__(self, cname, components): + self.cname = cname + self.components = components + self.size = len(components) + self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) + self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) + self.exception_check = True + self._convert_to_py_code = None + self._convert_from_py_code = None + # equivalent_type must be set now because it isn't available at import time + from .Builtin import tuple_type + self.equivalent_type = tuple_type + + def __str__(self): + return "(%s)" % ", ".join(str(c) for c in self.components) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + return "%s %s" % (str(self), entity_code) + else: + return self.base_declaration_code(self.cname, entity_code) + + def can_coerce_to_pyobject(self, env): + for component in self.components: + if not component.can_coerce_to_pyobject(env): + return False + return True + + def can_coerce_from_pyobject(self, env): + for component in self.components: + if not component.can_coerce_from_pyobject(env): + return False + return True + + def create_to_py_utility_code(self, env): + if self._convert_to_py_code is False: + return None # tri-state-ish + + if self._convert_to_py_code is None: + for component in self.components: + if not component.create_to_py_utility_code(env): + self.to_py_function = None + self._convert_to_py_code = False + return False + + context = dict( + struct_type_decl=self.empty_declaration_code(), + components=self.components, + funcname=self.to_py_function, + size=len(self.components) + ) + self._convert_to_py_code = TempitaUtilityCode.load( + "ToPyCTupleUtility", "TypeConversion.c", context=context) + + env.use_utility_code(self._convert_to_py_code) + return True + + def create_from_py_utility_code(self, env): + if self._convert_from_py_code is False: + return None # tri-state-ish + + if self._convert_from_py_code is None: + for component in self.components: + if not component.create_from_py_utility_code(env): + self.from_py_function = None + self._convert_from_py_code = False + return False + + context = dict( + struct_type_decl=self.empty_declaration_code(), + components=self.components, + funcname=self.from_py_function, + size=len(self.components) + ) + self._convert_from_py_code = TempitaUtilityCode.load( + "FromPyCTupleUtility", "TypeConversion.c", context=context) + + env.use_utility_code(self._convert_from_py_code) + return True + + def cast_code(self, expr_code): + return expr_code + + def specialize(self, values): + assert hasattr(self, "entry") + components = [c.specialize(values) for c in self.components] + new_entry = self.entry.scope.declare_tuple_type(self.entry.pos, components) + return new_entry.type + + +def c_tuple_type(components): + components = tuple(components) + if any(c.is_fused for c in components): + cname = "" # should never end up in code + else: + cname = Naming.ctuple_type_prefix + type_list_identifier(components) + tuple_type = CTupleType(cname, components) + return tuple_type + + +class UnspecifiedType(PyrexType): + # Used as a placeholder until the type can be determined. + + is_unspecified = 1 + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + return "" + + def same_as_resolved_type(self, other_type): + return False + + +class ErrorType(PyrexType): + # Used to prevent propagation of error messages. + + is_error = 1 + exception_value = "0" + exception_check = 0 + to_py_function = "dummy" + from_py_function = "dummy" + + def create_to_py_utility_code(self, env): + return True + + def create_from_py_utility_code(self, env): + return True + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + return "" + + def same_as_resolved_type(self, other_type): + return 1 + + def error_condition(self, result_code): + return "dummy" + + +class PythonTypeConstructorMixin(object): + """Used to help Cython interpret indexed types from the typing module (or similar) + """ + modifier_name = None + + def set_python_type_constructor_name(self, name): + self.python_type_constructor_name = name + + def specialize_here(self, pos, env, template_values=None): + # for a lot of the typing classes it doesn't really matter what the template is + # (i.e. typing.Dict[int] is really just a dict) + return self + + def __repr__(self): + if self.base_type: + return "%s[%r]" % (self.name, self.base_type) + else: + return self.name + + def is_template_type(self): + return True + + +class BuiltinTypeConstructorObjectType(BuiltinObjectType, PythonTypeConstructorMixin): + """ + builtin types like list, dict etc which can be subscripted in annotations + """ + def __init__(self, name, cname, objstruct_cname=None): + super(BuiltinTypeConstructorObjectType, self).__init__( + name, cname, objstruct_cname=objstruct_cname) + self.set_python_type_constructor_name(name) + + +class PythonTupleTypeConstructor(BuiltinTypeConstructorObjectType): + def specialize_here(self, pos, env, template_values=None): + if (template_values and None not in template_values and + not any(v.is_pyobject for v in template_values)): + entry = env.declare_tuple_type(pos, template_values) + if entry: + entry.used = True + return entry.type + return super(PythonTupleTypeConstructor, self).specialize_here(pos, env, template_values) + + +class SpecialPythonTypeConstructor(PyObjectType, PythonTypeConstructorMixin): + """ + For things like ClassVar, Optional, etc, which are not types and disappear during type analysis. + """ + + def __init__(self, name): + super(SpecialPythonTypeConstructor, self).__init__() + self.set_python_type_constructor_name(name) + self.modifier_name = name + + def __repr__(self): + return self.name + + def resolve(self): + return self + + def specialize_here(self, pos, env, template_values=None): + if len(template_values) != 1: + error(pos, "'%s' takes exactly one template argument." % self.name) + return error_type + if template_values[0] is None: + # FIXME: allowing unknown types for now since we don't recognise all Python types. + return None + # Replace this type with the actual 'template' argument. + return template_values[0].resolve() + + +rank_to_type_name = ( + "char", # 0 + "short", # 1 + "int", # 2 + "long", # 3 + "PY_LONG_LONG", # 4 + "float", # 5 + "double", # 6 + "long double", # 7 +) + +RANK_INT = rank_to_type_name.index('int') +RANK_LONG = rank_to_type_name.index('long') +RANK_FLOAT = rank_to_type_name.index('float') +UNSIGNED = 0 +SIGNED = 2 + +error_type = ErrorType() +unspecified_type = UnspecifiedType() + +py_object_type = PyObjectType() + +c_void_type = CVoidType() + +c_uchar_type = CIntType(0, UNSIGNED) +c_ushort_type = CIntType(1, UNSIGNED) +c_uint_type = CIntType(2, UNSIGNED) +c_ulong_type = CIntType(3, UNSIGNED) +c_ulonglong_type = CIntType(4, UNSIGNED) + +c_char_type = CIntType(0) +c_short_type = CIntType(1) +c_int_type = CIntType(2) +c_long_type = CIntType(3) +c_longlong_type = CIntType(4) + +c_schar_type = CIntType(0, SIGNED) +c_sshort_type = CIntType(1, SIGNED) +c_sint_type = CIntType(2, SIGNED) +c_slong_type = CIntType(3, SIGNED) +c_slonglong_type = CIntType(4, SIGNED) + +c_float_type = CFloatType(5, math_h_modifier='f') +c_double_type = CFloatType(6) +c_longdouble_type = CFloatType(7, math_h_modifier='l') + +c_float_complex_type = CComplexType(c_float_type) +c_double_complex_type = CComplexType(c_double_type) +c_longdouble_complex_type = CComplexType(c_longdouble_type) + +soft_complex_type = SoftCComplexType() + +c_anon_enum_type = CAnonEnumType(-1) +c_returncode_type = CReturnCodeType(RANK_INT) +c_bint_type = CBIntType(RANK_INT) +c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED) +c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED) +c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED) +c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED) +c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED) +c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED) +c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED) + +c_null_ptr_type = CNullPtrType(c_void_type) +c_void_ptr_type = CPtrType(c_void_type) +c_void_ptr_ptr_type = CPtrType(c_void_ptr_type) +c_char_ptr_type = CPtrType(c_char_type) +c_const_char_ptr_type = CPtrType(CConstType(c_char_type)) +c_uchar_ptr_type = CPtrType(c_uchar_type) +c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type)) +c_char_ptr_ptr_type = CPtrType(c_char_ptr_type) +c_int_ptr_type = CPtrType(c_int_type) +c_py_unicode_ptr_type = CPtrType(c_py_unicode_type) +c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type)) +c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type) +c_ssize_t_ptr_type = CPtrType(c_ssize_t_type) +c_size_t_ptr_type = CPtrType(c_size_t_type) + +# GIL state +c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True) +c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState") +c_threadstate_ptr_type = CPtrType(c_threadstate_type) + +# PEP-539 "Py_tss_t" type +c_pytss_t_type = CPyTSSTType() + +# the Py_buffer type is defined in Builtin.py +c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer") +c_py_buffer_ptr_type = CPtrType(c_py_buffer_type) + +# Not sure whether the unsigned versions and 'long long' should be in there +# long long requires C99 and might be slow, and would always get preferred +# when specialization happens through calling and not indexing +cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type], + name="integral") +# Omitting long double as it might be slow +cy_floating_type = FusedType([c_float_type, c_double_type], name="floating") +cy_numeric_type = FusedType([c_short_type, + c_int_type, + c_long_type, + c_float_type, + c_double_type, + c_float_complex_type, + c_double_complex_type], name="numeric") + +# buffer-related structs +c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct", + None, 1, "__Pyx_Buf_DimInfo") +c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer") +c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type) +c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct", + None, 1, "__Pyx_LocalBuf_ND") + +cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct", + None, 0, "__pyx_memoryview_obj") + +memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct", + None, 1, "__Pyx_memviewslice") + +modifiers_and_name_to_type = { + #(signed, longness, name) : type + (0, 0, "char"): c_uchar_type, + (1, 0, "char"): c_char_type, + (2, 0, "char"): c_schar_type, + + (0, -1, "int"): c_ushort_type, + (0, 0, "int"): c_uint_type, + (0, 1, "int"): c_ulong_type, + (0, 2, "int"): c_ulonglong_type, + + (1, -1, "int"): c_short_type, + (1, 0, "int"): c_int_type, + (1, 1, "int"): c_long_type, + (1, 2, "int"): c_longlong_type, + + (2, -1, "int"): c_sshort_type, + (2, 0, "int"): c_sint_type, + (2, 1, "int"): c_slong_type, + (2, 2, "int"): c_slonglong_type, + + (1, 0, "float"): c_float_type, + (1, 0, "double"): c_double_type, + (1, 1, "double"): c_longdouble_type, + + (1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins + (1, 0, "floatcomplex"): c_float_complex_type, + (1, 0, "doublecomplex"): c_double_complex_type, + (1, 1, "doublecomplex"): c_longdouble_complex_type, + + # + (1, 0, "void"): c_void_type, + (1, 0, "Py_tss_t"): c_pytss_t_type, + + (1, 0, "bint"): c_bint_type, + (0, 0, "Py_UNICODE"): c_py_unicode_type, + (0, 0, "Py_UCS4"): c_py_ucs4_type, + (2, 0, "Py_hash_t"): c_py_hash_t_type, + (2, 0, "Py_ssize_t"): c_py_ssize_t_type, + (2, 0, "ssize_t") : c_ssize_t_type, + (0, 0, "size_t") : c_size_t_type, + (2, 0, "ptrdiff_t") : c_ptrdiff_t_type, + + (1, 0, "object"): py_object_type, +} + +def is_promotion(src_type, dst_type): + # It's hard to find a hard definition of promotion, but empirical + # evidence suggests that the below is all that's allowed. + if src_type.is_numeric: + if dst_type.same_as(c_int_type): + unsigned = (not src_type.signed) + return (src_type.is_enum or + (src_type.is_int and + unsigned + src_type.rank < dst_type.rank)) + elif dst_type.same_as(c_double_type): + return src_type.is_float and src_type.rank <= dst_type.rank + return False + +def best_match(arg_types, functions, pos=None, env=None, args=None): + """ + Given a list args of arguments and a list of functions, choose one + to call which seems to be the "best" fit for this list of arguments. + This function is used, e.g., when deciding which overloaded method + to dispatch for C++ classes. + + We first eliminate functions based on arity, and if only one + function has the correct arity, we return it. Otherwise, we weight + functions based on how much work must be done to convert the + arguments, with the following priorities: + * identical types or pointers to identical types + * promotions + * non-Python types + That is, we prefer functions where no arguments need converted, + and failing that, functions where only promotions are required, and + so on. + + If no function is deemed a good fit, or if two or more functions have + the same weight, we return None (as there is no best match). If pos + is not None, we also generate an error. + """ + # TODO: args should be a list of types, not a list of Nodes. + actual_nargs = len(arg_types) + + candidates = [] + errors = [] + for func in functions: + error_mesg = "" + func_type = func.type + if func_type.is_ptr: + func_type = func_type.base_type + # Check function type + if not func_type.is_cfunction: + if not func_type.is_error and pos is not None: + error_mesg = "Calling non-function type '%s'" % func_type + errors.append((func, error_mesg)) + continue + # Check no. of args + max_nargs = len(func_type.args) + min_nargs = max_nargs - func_type.optional_arg_count + if actual_nargs < min_nargs or (not func_type.has_varargs and actual_nargs > max_nargs): + if max_nargs == min_nargs and not func_type.has_varargs: + expectation = max_nargs + elif actual_nargs < min_nargs: + expectation = "at least %s" % min_nargs + else: + expectation = "at most %s" % max_nargs + error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \ + % (expectation, actual_nargs) + errors.append((func, error_mesg)) + continue + if func_type.templates: + # For any argument/parameter pair A/P, if P is a forwarding reference, + # use lvalue-reference-to-A for deduction in place of A when the + # function call argument is an lvalue. See: + # https://en.cppreference.com/w/cpp/language/template_argument_deduction#Deduction_from_a_function_call + arg_types_for_deduction = list(arg_types) + if func.type.is_cfunction and args: + for i, formal_arg in enumerate(func.type.args): + if formal_arg.is_forwarding_reference(): + if args[i].is_lvalue(): + arg_types_for_deduction[i] = c_ref_type(arg_types[i]) + deductions = reduce( + merge_template_deductions, + [pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types_for_deduction)], + {}) + if deductions is None: + errors.append((func, "Unable to deduce type parameters for %s given (%s)" % ( + func_type, ', '.join(map(str, arg_types_for_deduction))))) + elif len(deductions) < len(func_type.templates): + errors.append((func, "Unable to deduce type parameter %s" % ( + ", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())])))) + else: + type_list = [deductions[param] for param in func_type.templates] + from .Symtab import Entry + specialization = Entry( + name = func.name + "[%s]" % ",".join([str(t) for t in type_list]), + cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]), + type = func_type.specialize(deductions), + pos = func.pos) + candidates.append((specialization, specialization.type)) + else: + candidates.append((func, func_type)) + + # Optimize the most common case of no overloading... + if len(candidates) == 1: + return candidates[0][0] + elif len(candidates) == 0: + if pos is not None and errors: + func, errmsg = errors[0] + if len(errors) == 1 or [1 for func, e in errors if e == errmsg]: + error(pos, errmsg) + else: + error(pos, "no suitable method found") + return None + + possibilities = [] + bad_types = [] + needed_coercions = {} + + for index, (func, func_type) in enumerate(candidates): + score = [0,0,0,0,0,0,0] + for i in range(min(actual_nargs, len(func_type.args))): + src_type = arg_types[i] + dst_type = func_type.args[i].type + + assignable = dst_type.assignable_from(src_type) + + # Now take care of unprefixed string literals. So when you call a cdef + # function that takes a char *, the coercion will mean that the + # type will simply become bytes. We need to do this coercion + # manually for overloaded and fused functions + if not assignable: + c_src_type = None + if src_type.is_pyobject: + if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string: + c_src_type = dst_type.resolve() + else: + c_src_type = src_type.default_coerced_ctype() + elif src_type.is_pythran_expr: + c_src_type = src_type.org_buffer + + if c_src_type is not None: + assignable = dst_type.assignable_from(c_src_type) + if assignable: + src_type = c_src_type + needed_coercions[func] = (i, dst_type) + + if assignable: + if src_type == dst_type or dst_type.same_as(src_type): + pass # score 0 + elif func_type.is_strict_signature: + break # exact match requested but not found + elif is_promotion(src_type, dst_type): + score[2] += 1 + elif ((src_type.is_int and dst_type.is_int) or + (src_type.is_float and dst_type.is_float)): + score[2] += abs(dst_type.rank + (not dst_type.signed) - + (src_type.rank + (not src_type.signed))) + 1 + elif dst_type.is_ptr and src_type.is_ptr: + if dst_type.base_type == c_void_type: + score[4] += 1 + elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type): + score[6] += src_type.base_type.subclass_dist(dst_type.base_type) + else: + score[5] += 1 + elif not src_type.is_pyobject: + score[1] += 1 + else: + score[0] += 1 + else: + error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type) + bad_types.append((func, error_mesg)) + break + else: + possibilities.append((score, index, func)) # so we can sort it + + if possibilities: + possibilities.sort() + if len(possibilities) > 1: + score1 = possibilities[0][0] + score2 = possibilities[1][0] + if score1 == score2: + if pos is not None: + error(pos, "ambiguous overloaded method") + return None + + function = possibilities[0][-1] + + if function in needed_coercions and env: + arg_i, coerce_to_type = needed_coercions[function] + args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env) + + return function + + if pos is not None: + if len(bad_types) == 1: + error(pos, bad_types[0][1]) + else: + error(pos, "no suitable method found") + + return None + +def merge_template_deductions(a, b): + if a is None or b is None: + return None + all = a + for param, value in b.items(): + if param in all: + if a[param] != b[param]: + return None + else: + all[param] = value + return all + + +def widest_numeric_type(type1, type2): + """Given two numeric types, return the narrowest type encompassing both of them. + """ + if type1.is_reference: + type1 = type1.ref_base_type + if type2.is_reference: + type2 = type2.ref_base_type + if type1.is_cv_qualified: + type1 = type1.cv_base_type + if type2.is_cv_qualified: + type2 = type2.cv_base_type + if type1 == type2: + widest_type = type1 + elif type1.is_complex or type2.is_complex: + def real_type(ntype): + if ntype.is_complex: + return ntype.real_type + return ntype + widest_type = CComplexType( + widest_numeric_type( + real_type(type1), + real_type(type2))) + if type1 is soft_complex_type or type2 is soft_complex_type: + type1_is_other_complex = type1 is not soft_complex_type and type1.is_complex + type2_is_other_complex = type2 is not soft_complex_type and type2.is_complex + if (not type1_is_other_complex and not type2_is_other_complex and + widest_type.real_type == soft_complex_type.real_type): + # ensure we can do an actual "is" comparison + # (this possibly goes slightly wrong when mixing long double and soft complex) + widest_type = soft_complex_type + elif type1.is_enum and type2.is_enum: + widest_type = c_int_type + elif type1.rank < type2.rank: + widest_type = type2 + elif type1.rank > type2.rank: + widest_type = type1 + elif type1.signed < type2.signed: + widest_type = type1 + elif type1.signed > type2.signed: + widest_type = type2 + elif type1.is_typedef > type2.is_typedef: + widest_type = type1 + else: + widest_type = type2 + return widest_type + + +def numeric_type_fits(small_type, large_type): + return widest_numeric_type(small_type, large_type) == large_type + + +def independent_spanning_type(type1, type2): + # Return a type assignable independently from both type1 and + # type2, but do not require any interoperability between the two. + # For example, in "True * 2", it is safe to assume an integer + # result type (so spanning_type() will do the right thing), + # whereas "x = True or 2" must evaluate to a type that can hold + # both a boolean value and an integer, so this function works + # better. + if type1.is_reference ^ type2.is_reference: + if type1.is_reference: + type1 = type1.ref_base_type + else: + type2 = type2.ref_base_type + + resolved_type1 = type1.resolve() + resolved_type2 = type2.resolve() + if resolved_type1 == resolved_type2: + return type1 + elif ((resolved_type1 is c_bint_type or resolved_type2 is c_bint_type) + and (type1.is_numeric and type2.is_numeric)): + # special case: if one of the results is a bint and the other + # is another C integer, we must prevent returning a numeric + # type so that we do not lose the ability to coerce to a + # Python bool if we have to. + return py_object_type + + span_type = _spanning_type(type1, type2) + if span_type is None: + return error_type + return span_type + +def spanning_type(type1, type2): + # Return a type assignable from both type1 and type2, or + # py_object_type if no better type is found. Assumes that the + # code that calls this will try a coercion afterwards, which will + # fail if the types cannot actually coerce to a py_object_type. + if type1 == type2: + return type1 + elif type1 is py_object_type or type2 is py_object_type: + return py_object_type + elif type1 is c_py_unicode_type or type2 is c_py_unicode_type: + # Py_UNICODE behaves more like a string than an int + return py_object_type + span_type = _spanning_type(type1, type2) + if span_type is None: + return py_object_type + return span_type + +def _spanning_type(type1, type2): + if type1.is_numeric and type2.is_numeric: + return widest_numeric_type(type1, type2) + elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric: + return widest_numeric_type(c_double_type, type2) + elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric: + return widest_numeric_type(type1, c_double_type) + elif type1.is_extension_type and type2.is_extension_type: + return widest_extension_type(type1, type2) + elif type1.is_pyobject or type2.is_pyobject: + return py_object_type + elif type1.assignable_from(type2): + if type1.is_extension_type and type1.typeobj_is_imported(): + # external types are unsafe, so we use PyObject instead + return py_object_type + return type1 + elif type2.assignable_from(type1): + if type2.is_extension_type and type2.typeobj_is_imported(): + # external types are unsafe, so we use PyObject instead + return py_object_type + return type2 + elif type1.is_ptr and type2.is_ptr: + if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class: + common_base = widest_cpp_type(type1.base_type, type2.base_type) + if common_base: + return CPtrType(common_base) + # incompatible pointers, void* will do as a result + return c_void_ptr_type + else: + return None + +def widest_extension_type(type1, type2): + if type1.typeobj_is_imported() or type2.typeobj_is_imported(): + return py_object_type + while True: + if type1.subtype_of(type2): + return type2 + elif type2.subtype_of(type1): + return type1 + type1, type2 = type1.base_type, type2.base_type + if type1 is None or type2 is None: + return py_object_type + +def widest_cpp_type(type1, type2): + @cached_function + def bases(type): + all = set() + for base in type.base_classes: + all.add(base) + all.update(bases(base)) + return all + common_bases = bases(type1).intersection(bases(type2)) + common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set()) + candidates = [b for b in common_bases if b not in common_bases_bases] + if len(candidates) == 1: + return candidates[0] + else: + # Fall back to void* for now. + return None + + +def simple_c_type(signed, longness, name): + # Find type descriptor for simple type given name and modifiers. + # Returns None if arguments don't make sense. + return modifiers_and_name_to_type.get((signed, longness, name)) + +def parse_basic_type(name): + base = None + if name.startswith('p_'): + base = parse_basic_type(name[2:]) + elif name.startswith('p'): + base = parse_basic_type(name[1:]) + elif name.endswith('*'): + base = parse_basic_type(name[:-1]) + if base: + return CPtrType(base) + # + basic_type = simple_c_type(1, 0, name) + if basic_type: + return basic_type + # + signed = 1 + longness = 0 + if name == 'Py_UNICODE': + signed = 0 + elif name == 'Py_UCS4': + signed = 0 + elif name == 'Py_hash_t': + signed = 2 + elif name == 'Py_ssize_t': + signed = 2 + elif name == 'ssize_t': + signed = 2 + elif name == 'size_t': + signed = 0 + elif name == 'ptrdiff_t': + signed = 2 + else: + if name.startswith('u'): + name = name[1:] + signed = 0 + elif (name.startswith('s') and + not name.startswith('short')): + name = name[1:] + signed = 2 + longness = 0 + while name.startswith('short'): + name = name.replace('short', '', 1).strip() + longness -= 1 + while name.startswith('long'): + name = name.replace('long', '', 1).strip() + longness += 1 + if longness != 0 and not name: + name = 'int' + return simple_c_type(signed, longness, name) + + +def _construct_type_from_base(cls, base_type, *args): + if base_type is error_type: + return error_type + return cls(base_type, *args) + +def c_array_type(base_type, size): + # Construct a C array type. + return _construct_type_from_base(CArrayType, base_type, size) + +def c_ptr_type(base_type): + # Construct a C pointer type. + if base_type.is_reference: + base_type = base_type.ref_base_type + return _construct_type_from_base(CPtrType, base_type) + +def c_ref_type(base_type): + # Construct a C reference type + return _construct_type_from_base(CReferenceType, base_type) + +def cpp_rvalue_ref_type(base_type): + # Construct a C++ rvalue reference type + return _construct_type_from_base(CppRvalueReferenceType, base_type) + +def c_const_type(base_type): + # Construct a C const type. + return _construct_type_from_base(CConstType, base_type) + +def c_const_or_volatile_type(base_type, is_const, is_volatile): + # Construct a C const/volatile type. + return _construct_type_from_base(CConstOrVolatileType, base_type, is_const, is_volatile) + + +def same_type(type1, type2): + return type1.same_as(type2) + +def assignable_from(type1, type2): + return type1.assignable_from(type2) + +def typecast(to_type, from_type, expr_code): + # Return expr_code cast to a C type which can be + # assigned to to_type, assuming its existing C type + # is from_type. + if (to_type is from_type or + (not to_type.is_pyobject and assignable_from(to_type, from_type))): + return expr_code + elif (to_type is py_object_type and from_type and + from_type.is_builtin_type and from_type.name != 'type'): + # no cast needed, builtins are PyObject* already + return expr_code + else: + #print "typecast: to", to_type, "from", from_type ### + return to_type.cast_code(expr_code) + +def type_list_identifier(types): + return cap_length('__and_'.join(type_identifier(type) for type in types)) + +_special_type_characters = { + '__': '__dunder', + 'const ': '__const_', + ' ': '__space_', + '*': '__ptr', + '&': '__ref', + '&&': '__fwref', + '[': '__lArr', + ']': '__rArr', + '<': '__lAng', + '>': '__rAng', + '(': '__lParen', + ')': '__rParen', + ',': '__comma_', + '...': '__EL', + '::': '__in_', + ':': '__D', +} + +_escape_special_type_characters = partial(re.compile( + # join substrings in reverse order to put longer matches first, e.g. "::" before ":" + " ?(%s) ?" % "|".join(re.escape(s) for s in sorted(_special_type_characters, reverse=True)) +).sub, lambda match: _special_type_characters[match.group(1)]) + +def type_identifier(type, pyrex=False): + scope = None + decl = type.empty_declaration_code(pyrex=pyrex) + entry = getattr(type, "entry", None) + if entry and entry.scope: + scope = entry.scope + return type_identifier_from_declaration(decl, scope=scope) + +_type_identifier_cache = {} +def type_identifier_from_declaration(decl, scope = None): + key = (decl, scope) + safe = _type_identifier_cache.get(key) + if safe is None: + safe = decl + if scope: + safe = scope.mangle(prefix="", name=safe) + safe = re.sub(' +', ' ', safe) + safe = re.sub(' ?([^a-zA-Z0-9_]) ?', r'\1', safe) + safe = _escape_special_type_characters(safe) + safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe)) + _type_identifier_cache[key] = safe + return safe + +def cap_length(s, max_len=63): + if len(s) <= max_len: + return s + hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6] + return '%s__%s__etc' % (hash_prefix, s[:max_len-17]) + +def write_noexcept_performance_hint(pos, env, + function_name=None, void_return=False, is_call=False, + is_from_pxd=False): + if function_name: + # we need it escaped everywhere we use it + function_name = "'%s'" % function_name + if is_call: + on_what = "after calling %s " % (function_name or 'function') + elif function_name: + on_what = "on %s " % function_name + else: + on_what ='' + msg = ( + "Exception check %swill always require the GIL to be acquired." + ) % on_what + the_function = function_name if function_name else "the function" + if is_call and not function_name: + the_function = the_function + " you are calling" + solutions = ["Declare %s as 'noexcept' if you control the definition and " + "you're sure you don't want the function to raise exceptions." + % the_function] + if void_return: + solutions.append( + "Use an 'int' return type on %s to allow an error code to be returned." % + the_function) + if is_from_pxd and not void_return: + solutions.append( + "Declare any exception value explicitly for functions in pxd files.") + if len(solutions) == 1: + msg = "%s %s" % (msg, solutions[0]) + else: + solutions = ["\t%s. %s" % (i+1, s) for i, s in enumerate(solutions)] + msg = "%s\nPossible solutions:\n%s" % (msg, "\n".join(solutions)) + performance_hint(pos, msg, env) + +def remove_cv_ref(tp, remove_fakeref=False): + # named by analogy with c++ std::remove_cv_ref + last_tp = None + # The while-loop is probably unnecessary, but I'm not confident + # of the order or how careful we are prevent nesting. + while tp != last_tp: + last_tp = tp + if tp.is_cv_qualified: + tp = tp.cv_base_type + if tp.is_reference and (not tp.is_fake_reference or remove_fakeref): + tp = tp.ref_base_type + return tp diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Pythran.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Pythran.py new file mode 100644 index 0000000000000000000000000000000000000000..c02704a918ce6cb4e83ef28b78f678a748a022c3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Pythran.py @@ -0,0 +1,227 @@ +# cython: language_level=3 + +from __future__ import absolute_import + +from .PyrexTypes import CType, CTypedefType, CStructOrUnionType + +import cython + +try: + import pythran + pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9) + pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6) +except ImportError: + pythran = None + pythran_is_pre_0_9 = True + pythran_is_pre_0_9_6 = True + +if pythran_is_pre_0_9_6: + pythran_builtins = '__builtin__' +else: + pythran_builtins = 'builtins' + + +# Pythran/Numpy specific operations + +def has_np_pythran(env): + if env is None: + return False + directives = getattr(env, 'directives', None) + return (directives and directives.get('np_pythran', False)) + +@cython.ccall +def is_pythran_supported_dtype(type_): + if isinstance(type_, CTypedefType): + return is_pythran_supported_type(type_.typedef_base_type) + return type_.is_numeric + + +def pythran_type(Ty, ptype="ndarray"): + if Ty.is_buffer: + ndim,dtype = Ty.ndim, Ty.dtype + if isinstance(dtype, CStructOrUnionType): + ctype = dtype.cname + elif isinstance(dtype, CType): + ctype = dtype.sign_and_name() + elif isinstance(dtype, CTypedefType): + ctype = dtype.typedef_cname + else: + raise ValueError("unsupported type %s!" % dtype) + if pythran_is_pre_0_9: + return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim) + else: + return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim)) + if Ty.is_pythran_expr: + return Ty.pythran_type + #if Ty.is_none: + # return "decltype(pythonic::builtins::None)" + if Ty.is_numeric: + return Ty.sign_and_name() + raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty))) + + +@cython.cfunc +def type_remove_ref(ty): + return "typename std::remove_reference<%s>::type" % ty + + +def pythran_binop_type(op, tA, tB): + if op == '**': + return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % ( + pythran_type(tA), pythran_type(tB)) + else: + return "decltype(std::declval<%s>() %s std::declval<%s>())" % ( + pythran_type(tA), op, pythran_type(tB)) + + +def pythran_unaryop_type(op, type_): + return "decltype(%sstd::declval<%s>())" % ( + op, pythran_type(type_)) + + +@cython.cfunc +def _index_access(index_code, indices): + indexing = ",".join([index_code(idx) for idx in indices]) + return ('[%s]' if len(indices) == 1 else '(%s)') % indexing + + +def _index_type_code(index_with_type): + idx, index_type = index_with_type + if idx.is_slice: + n = 2 + int(not idx.step.is_none) + return "pythonic::%s::functor::slice{}(%s)" % ( + pythran_builtins, + ",".join(["0"]*n)) + elif index_type.is_int: + return "std::declval<%s>()" % index_type.sign_and_name() + elif index_type.is_pythran_expr: + return "std::declval<%s>()" % index_type.pythran_type + raise ValueError("unsupported indexing type %s!" % index_type) + + +def _index_code(idx): + if idx.is_slice: + values = idx.start, idx.stop, idx.step + if idx.step.is_none: + func = "contiguous_slice" + values = values[:2] + else: + func = "slice" + return "pythonic::types::%s(%s)" % ( + func, ",".join((v.pythran_result() for v in values))) + elif idx.type.is_int: + return to_pythran(idx) + elif idx.type.is_pythran_expr: + return idx.pythran_result() + raise ValueError("unsupported indexing type %s" % idx.type) + + +def pythran_indexing_type(type_, indices): + return type_remove_ref("decltype(std::declval<%s>()%s)" % ( + pythran_type(type_), + _index_access(_index_type_code, indices), + )) + + +def pythran_indexing_code(indices): + return _index_access(_index_code, indices) + +def np_func_to_list(func): + if not func.is_numpy_attribute: + return [] + return np_func_to_list(func.obj) + [func.attribute] + +if pythran is None: + def pythran_is_numpy_func_supported(name): + return False +else: + def pythran_is_numpy_func_supported(func): + CurF = pythran.tables.MODULES['numpy'] + FL = np_func_to_list(func) + for F in FL: + CurF = CurF.get(F, None) + if CurF is None: + return False + return True + +def pythran_functor(func): + func = np_func_to_list(func) + submodules = "::".join(func[:-1] + ["functor"]) + return "pythonic::numpy::%s::%s" % (submodules, func[-1]) + +def pythran_func_type(func, args): + args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args)) + return "decltype(%s{}(%s))" % (pythran_functor(func), args) + + +@cython.ccall +def to_pythran(op, ptype=None): + op_type = op.type + if op_type.is_int: + # Make sure that integer literals always have exactly the type that the templates expect. + return op_type.cast_code(op.result()) + if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]): + return op.result() + if op.is_none: + return "pythonic::%s::None" % pythran_builtins + if ptype is None: + ptype = pythran_type(op_type) + + assert op.type.is_pyobject + return "from_python<%s>(%s)" % (ptype, op.py_result()) + + +@cython.cfunc +def is_type(type_, types): + for attr in types: + if getattr(type_, attr, False): + return True + return False + + +def is_pythran_supported_node_or_none(node): + return node.is_none or is_pythran_supported_type(node.type) + + +@cython.ccall +def is_pythran_supported_type(type_): + pythran_supported = ( + "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex") + return is_type(type_, pythran_supported) or is_pythran_expr(type_) + + +def is_pythran_supported_operation_type(type_): + pythran_supported = ( + "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex") + return is_type(type_,pythran_supported) or is_pythran_expr(type_) + + +@cython.ccall +def is_pythran_expr(type_): + return type_.is_pythran_expr + + +def is_pythran_buffer(type_): + return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and + type_.mode in ("c", "strided") and not type_.cast) + +def pythran_get_func_include_file(func): + func = np_func_to_list(func) + return "pythonic/numpy/%s.hpp" % "/".join(func) + +def include_pythran_generic(env): + # Generic files + env.add_include_file("pythonic/core.hpp") + env.add_include_file("pythonic/python/core.hpp") + env.add_include_file("pythonic/types/bool.hpp") + env.add_include_file("pythonic/types/ndarray.hpp") + env.add_include_file("pythonic/numpy/power.hpp") + env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins) + env.add_include_file("") # for placement new + + for i in (8, 16, 32, 64): + env.add_include_file("pythonic/types/uint%d.hpp" % i) + env.add_include_file("pythonic/types/int%d.hpp" % i) + for t in ("float", "float32", "float64", "set", "slice", "tuple", "int", + "complex", "complex64", "complex128"): + env.add_include_file("pythonic/types/%s.hpp" % t) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Scanning.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Scanning.py new file mode 100644 index 0000000000000000000000000000000000000000..372392bb7c1d28a300e887df6acc4f80d3d1076e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Scanning.py @@ -0,0 +1,582 @@ +# cython: infer_types=True, language_level=3, auto_pickle=False +# +# Cython Scanner +# + +from __future__ import absolute_import + +import cython +cython.declare(make_lexicon=object, lexicon=object, + print_function=object, error=object, warning=object, + os=object, platform=object) + +import os +import platform +from unicodedata import normalize +from contextlib import contextmanager + +from .. import Utils +from ..Plex.Scanners import Scanner +from ..Plex.Errors import UnrecognizedInput +from .Errors import error, warning, hold_errors, release_errors, CompileError +from .Lexicon import any_string_prefix, make_lexicon, IDENT +from .Future import print_function + +debug_scanner = 0 +trace_scanner = 0 +scanner_debug_flags = 0 +scanner_dump_file = None + +lexicon = None + + +def get_lexicon(): + global lexicon + if not lexicon: + lexicon = make_lexicon() + return lexicon + + +#------------------------------------------------------------------ + +py_reserved_words = [ + "global", "nonlocal", "def", "class", "print", "del", "pass", "break", + "continue", "return", "raise", "import", "exec", "try", + "except", "finally", "while", "if", "elif", "else", "for", + "in", "assert", "and", "or", "not", "is", "lambda", + "from", "yield", "with", +] + +pyx_reserved_words = py_reserved_words + [ + "include", "ctypedef", "cdef", "cpdef", + "cimport", "DEF", "IF", "ELIF", "ELSE" +] + + +#------------------------------------------------------------------ + +class CompileTimeScope(object): + + def __init__(self, outer=None): + self.entries = {} + self.outer = outer + + def declare(self, name, value): + self.entries[name] = value + + def update(self, other): + self.entries.update(other) + + def lookup_here(self, name): + return self.entries[name] + + def __contains__(self, name): + return name in self.entries + + def lookup(self, name): + try: + return self.lookup_here(name) + except KeyError: + outer = self.outer + if outer: + return outer.lookup(name) + else: + raise + + +def initial_compile_time_env(): + benv = CompileTimeScope() + names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE') + for name, value in zip(names, platform.uname()): + benv.declare(name, value) + try: + import __builtin__ as builtins + except ImportError: + import builtins + + names = ( + 'False', 'True', + 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', + 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter', + 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len', + 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', + 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', + 'sum', 'tuple', 'zip', + ### defined below in a platform independent way + # 'long', 'unicode', 'reduce', 'xrange' + ) + + for name in names: + try: + benv.declare(name, getattr(builtins, name)) + except AttributeError: + # ignore, likely Py3 + pass + + # Py2/3 adaptations + from functools import reduce + benv.declare('reduce', reduce) + benv.declare('unicode', getattr(builtins, 'unicode', getattr(builtins, 'str'))) + benv.declare('long', getattr(builtins, 'long', getattr(builtins, 'int'))) + benv.declare('xrange', getattr(builtins, 'xrange', getattr(builtins, 'range'))) + + denv = CompileTimeScope(benv) + return denv + + +#------------------------------------------------------------------ + +class SourceDescriptor(object): + """ + A SourceDescriptor should be considered immutable. + """ + filename = None + in_utility_code = False + + _file_type = 'pyx' + + _escaped_description = None + _cmp_name = '' + def __str__(self): + assert False # To catch all places where a descriptor is used directly as a filename + + def set_file_type_from_name(self, filename): + name, ext = os.path.splitext(filename) + self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx' + + def is_cython_file(self): + return self._file_type in ('pyx', 'pxd') + + def is_python_file(self): + return self._file_type == 'py' + + def get_escaped_description(self): + if self._escaped_description is None: + esc_desc = \ + self.get_description().encode('ASCII', 'replace').decode("ASCII") + # Use forward slashes on Windows since these paths + # will be used in the #line directives in the C/C++ files. + self._escaped_description = esc_desc.replace('\\', '/') + return self._escaped_description + + def __gt__(self, other): + # this is only used to provide some sort of order + try: + return self._cmp_name > other._cmp_name + except AttributeError: + return False + + def __lt__(self, other): + # this is only used to provide some sort of order + try: + return self._cmp_name < other._cmp_name + except AttributeError: + return False + + def __le__(self, other): + # this is only used to provide some sort of order + try: + return self._cmp_name <= other._cmp_name + except AttributeError: + return False + + def __copy__(self): + return self # immutable, no need to copy + + def __deepcopy__(self, memo): + return self # immutable, no need to copy + + +class FileSourceDescriptor(SourceDescriptor): + """ + Represents a code source. A code source is a more generic abstraction + for a "filename" (as sometimes the code doesn't come from a file). + Instances of code sources are passed to Scanner.__init__ as the + optional name argument and will be passed back when asking for + the position()-tuple. + """ + def __init__(self, filename, path_description=None): + filename = Utils.decode_filename(filename) + self.path_description = path_description or filename + self.filename = filename + # Prefer relative paths to current directory (which is most likely the project root) over absolute paths. + workdir = os.path.abspath('.') + os.sep + self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename + self.set_file_type_from_name(filename) + self._cmp_name = filename + self._lines = {} + + def get_lines(self, encoding=None, error_handling=None): + # we cache the lines only the second time this is called, in + # order to save memory when they are only used once + key = (encoding, error_handling) + try: + lines = self._lines[key] + if lines is not None: + return lines + except KeyError: + pass + + with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f: + lines = list(f) + + if key in self._lines: + self._lines[key] = lines + else: + # do not cache the first access, but remember that we + # already read it once + self._lines[key] = None + return lines + + def get_description(self): + try: + return os.path.relpath(self.path_description) + except ValueError: + # path not under current directory => use complete file path + return self.path_description + + def get_error_description(self): + path = self.filename + cwd = Utils.decode_filename(os.getcwd() + os.path.sep) + if path.startswith(cwd): + return path[len(cwd):] + return path + + def get_filenametable_entry(self): + return self.file_path + + def __eq__(self, other): + return isinstance(other, FileSourceDescriptor) and self.filename == other.filename + + def __hash__(self): + return hash(self.filename) + + def __repr__(self): + return "" % self.filename + + +class StringSourceDescriptor(SourceDescriptor): + """ + Instances of this class can be used instead of a filenames if the + code originates from a string object. + """ + def __init__(self, name, code): + self.name = name + #self.set_file_type_from_name(name) + self.codelines = [x + "\n" for x in code.split("\n")] + self._cmp_name = name + + def get_lines(self, encoding=None, error_handling=None): + if not encoding: + return self.codelines + else: + return [line.encode(encoding, error_handling).decode(encoding) + for line in self.codelines] + + def get_description(self): + return self.name + + get_error_description = get_description + + def get_filenametable_entry(self): + return "" + + def __hash__(self): + return id(self) + # Do not hash on the name, an identical string source should be the + # same object (name is often defaulted in other places) + # return hash(self.name) + + def __eq__(self, other): + return isinstance(other, StringSourceDescriptor) and self.name == other.name + + def __repr__(self): + return "" % self.name + + +#------------------------------------------------------------------ + +class PyrexScanner(Scanner): + # context Context Compilation context + # included_files [string] Files included with 'include' statement + # compile_time_env dict Environment for conditional compilation + # compile_time_eval boolean In a true conditional compilation context + # compile_time_expr boolean In a compile-time expression context + # put_back_on_failure list or None If set, this records states so the tentatively_scan + # contextmanager can restore it + + def __init__(self, file, filename, parent_scanner=None, + scope=None, context=None, source_encoding=None, parse_comments=True, initial_pos=None): + Scanner.__init__(self, get_lexicon(), file, filename, initial_pos) + + if filename.is_python_file(): + self.in_python_file = True + keywords = py_reserved_words + else: + self.in_python_file = False + keywords = pyx_reserved_words + self.keywords = {keyword: keyword for keyword in keywords} + + self.async_enabled = 0 + + if parent_scanner: + self.context = parent_scanner.context + self.included_files = parent_scanner.included_files + self.compile_time_env = parent_scanner.compile_time_env + self.compile_time_eval = parent_scanner.compile_time_eval + self.compile_time_expr = parent_scanner.compile_time_expr + + if parent_scanner.async_enabled: + self.enter_async() + else: + self.context = context + self.included_files = scope.included_files + self.compile_time_env = initial_compile_time_env() + self.compile_time_eval = 1 + self.compile_time_expr = 0 + if getattr(context.options, 'compile_time_env', None): + self.compile_time_env.update(context.options.compile_time_env) + self.parse_comments = parse_comments + self.source_encoding = source_encoding + self.trace = trace_scanner + self.indentation_stack = [0] + self.indentation_char = None + self.bracket_nesting_level = 0 + + self.put_back_on_failure = None + + self.begin('INDENT') + self.sy = '' + self.next() + + def normalize_ident(self, text): + try: + text.encode('ascii') # really just name.isascii but supports Python 2 and 3 + except UnicodeEncodeError: + text = normalize('NFKC', text) + self.produce(IDENT, text) + + def commentline(self, text): + if self.parse_comments: + self.produce('commentline', text) + + def strip_underscores(self, text, symbol): + self.produce(symbol, text.replace('_', '')) + + def current_level(self): + return self.indentation_stack[-1] + + def open_bracket_action(self, text): + self.bracket_nesting_level += 1 + return text + + def close_bracket_action(self, text): + self.bracket_nesting_level -= 1 + return text + + def newline_action(self, text): + if self.bracket_nesting_level == 0: + self.begin('INDENT') + self.produce('NEWLINE', '') + + string_states = { + "'": 'SQ_STRING', + '"': 'DQ_STRING', + "'''": 'TSQ_STRING', + '"""': 'TDQ_STRING' + } + + def begin_string_action(self, text): + while text[:1] in any_string_prefix: + text = text[1:] + self.begin(self.string_states[text]) + self.produce('BEGIN_STRING') + + def end_string_action(self, text): + self.begin('') + self.produce('END_STRING') + + def unclosed_string_action(self, text): + self.end_string_action(text) + self.error_at_scanpos("Unclosed string literal") + + def indentation_action(self, text): + self.begin('') + # Indentation within brackets should be ignored. + #if self.bracket_nesting_level > 0: + # return + # Check that tabs and spaces are being used consistently. + if text: + c = text[0] + #print "Scanner.indentation_action: indent with", repr(c) ### + if self.indentation_char is None: + self.indentation_char = c + #print "Scanner.indentation_action: setting indent_char to", repr(c) + else: + if self.indentation_char != c: + self.error_at_scanpos("Mixed use of tabs and spaces") + if text.replace(c, "") != "": + self.error_at_scanpos("Mixed use of tabs and spaces") + # Figure out how many indents/dedents to do + current_level = self.current_level() + new_level = len(text) + #print "Changing indent level from", current_level, "to", new_level ### + if new_level == current_level: + return + elif new_level > current_level: + #print "...pushing level", new_level ### + self.indentation_stack.append(new_level) + self.produce('INDENT', '') + else: + while new_level < self.current_level(): + #print "...popping level", self.indentation_stack[-1] ### + self.indentation_stack.pop() + self.produce('DEDENT', '') + #print "...current level now", self.current_level() ### + if new_level != self.current_level(): + self.error_at_scanpos("Inconsistent indentation") + + def eof_action(self, text): + while len(self.indentation_stack) > 1: + self.produce('DEDENT', '') + self.indentation_stack.pop() + self.produce('EOF', '') + + def next(self): + try: + sy, systring = self.read() + except UnrecognizedInput: + self.error_at_scanpos("Unrecognized character") + return # just a marker, error() always raises + if sy == IDENT: + if systring in self.keywords: + if systring == u'print' and print_function in self.context.future_directives: + self.keywords.pop('print', None) + elif systring == u'exec' and self.context.language_level >= 3: + self.keywords.pop('exec', None) + else: + sy = self.keywords[systring] # intern + systring = self.context.intern_ustring(systring) + if self.put_back_on_failure is not None: + self.put_back_on_failure.append((sy, systring, self.position())) + self.sy = sy + self.systring = systring + if False: # debug_scanner: + _, line, col = self.position() + if not self.systring or self.sy == self.systring: + t = self.sy + else: + t = "%s %s" % (self.sy, self.systring) + print("--- %3d %2d %s" % (line, col, t)) + + def peek(self): + saved = self.sy, self.systring + saved_pos = self.position() + self.next() + next = self.sy, self.systring + self.unread(self.sy, self.systring, self.position()) + self.sy, self.systring = saved + self.last_token_position_tuple = saved_pos + return next + + def put_back(self, sy, systring, pos): + self.unread(self.sy, self.systring, self.last_token_position_tuple) + self.sy = sy + self.systring = systring + self.last_token_position_tuple = pos + + + def error(self, message, pos=None, fatal=True): + if pos is None: + pos = self.position() + if self.sy == 'INDENT': + error(pos, "Possible inconsistent indentation") + err = error(pos, message) + if fatal: raise err + + def error_at_scanpos(self, message): + # Like error(fatal=True), but gets the current scanning position rather than + # the position of the last token read. + pos = self.get_current_scan_pos() + self.error(message, pos, True) + + def expect(self, what, message=None): + if self.sy == what: + self.next() + else: + self.expected(what, message) + + def expect_keyword(self, what, message=None): + if self.sy == IDENT and self.systring == what: + self.next() + else: + self.expected(what, message) + + def expected(self, what, message=None): + if message: + self.error(message) + else: + if self.sy == IDENT: + found = self.systring + else: + found = self.sy + self.error("Expected '%s', found '%s'" % (what, found)) + + def expect_indent(self): + self.expect('INDENT', "Expected an increase in indentation level") + + def expect_dedent(self): + self.expect('DEDENT', "Expected a decrease in indentation level") + + def expect_newline(self, message="Expected a newline", ignore_semicolon=False): + # Expect either a newline or end of file + useless_trailing_semicolon = None + if ignore_semicolon and self.sy == ';': + useless_trailing_semicolon = self.position() + self.next() + if self.sy != 'EOF': + self.expect('NEWLINE', message) + if useless_trailing_semicolon is not None: + warning(useless_trailing_semicolon, "useless trailing semicolon") + + def enter_async(self): + self.async_enabled += 1 + if self.async_enabled == 1: + self.keywords['async'] = 'async' + self.keywords['await'] = 'await' + + def exit_async(self): + assert self.async_enabled > 0 + self.async_enabled -= 1 + if not self.async_enabled: + del self.keywords['await'] + del self.keywords['async'] + if self.sy in ('async', 'await'): + self.sy, self.systring = IDENT, self.context.intern_ustring(self.sy) + +@contextmanager +@cython.locals(scanner=Scanner) +def tentatively_scan(scanner): + errors = hold_errors() + try: + put_back_on_failure = scanner.put_back_on_failure + scanner.put_back_on_failure = [] + initial_state = (scanner.sy, scanner.systring, scanner.position()) + try: + yield errors + except CompileError as e: + pass + finally: + if errors: + if scanner.put_back_on_failure: + for put_back in reversed(scanner.put_back_on_failure[:-1]): + scanner.put_back(*put_back) + # we need to restore the initial state too + scanner.put_back(*initial_state) + elif put_back_on_failure is not None: + # the outer "tentatively_scan" block that we're in might still + # want to undo this block + put_back_on_failure.extend(scanner.put_back_on_failure) + scanner.put_back_on_failure = put_back_on_failure + finally: + release_errors(ignore=True) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Symtab.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Symtab.py new file mode 100644 index 0000000000000000000000000000000000000000..5f088dd1c9210d4a66468a1ab8e25db4ff764ccd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Symtab.py @@ -0,0 +1,2916 @@ +# +# Symbol Table +# + +from __future__ import absolute_import + +import re +import copy +import operator + +try: + import __builtin__ as builtins +except ImportError: # Py3 + import builtins + +from ..Utils import try_finally_contextmanager +from .Errors import warning, error, InternalError, performance_hint +from .StringEncoding import EncodedString +from . import Options, Naming +from . import PyrexTypes +from .PyrexTypes import py_object_type, unspecified_type +from .TypeSlots import ( + pyfunction_signature, pymethod_signature, richcmp_special_methods, + get_slot_table, get_property_accessor_signature) +from . import Future + +from . import Code + +iso_c99_keywords = { + 'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', + 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', + 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof', + 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', + 'volatile', 'while', + '_Bool', '_Complex'', _Imaginary', 'inline', 'restrict', +} + + +def c_safe_identifier(cname): + # There are some C limitations on struct entry names. + if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix) + or cname in ('__weakref__', '__dict__'))) + or cname in iso_c99_keywords): + cname = Naming.pyrex_prefix + cname + return cname + +def punycodify_name(cname, mangle_with=None): + # if passed the mangle_with should be a byte string + # modified from PEP489 + try: + cname.encode('ascii') + except UnicodeEncodeError: + cname = cname.encode('punycode').replace(b'-', b'_').decode('ascii') + if mangle_with: + # sometimes it necessary to mangle unicode names alone where + # they'll be inserted directly into C, because the punycode + # transformation can turn them into invalid identifiers + cname = "%s_%s" % (mangle_with, cname) + elif cname.startswith(Naming.pyrex_prefix): + # a punycode name could also be a valid ascii variable name so + # change the prefix to distinguish + cname = cname.replace(Naming.pyrex_prefix, + Naming.pyunicode_identifier_prefix, 1) + + return cname + + + + +class BufferAux(object): + writable_needed = False + + def __init__(self, buflocal_nd_var, rcbuf_var): + self.buflocal_nd_var = buflocal_nd_var + self.rcbuf_var = rcbuf_var + + def __repr__(self): + return "" % self.__dict__ + + +class Entry(object): + # A symbol table entry in a Scope or ModuleNamespace. + # + # name string Python name of entity + # cname string C name of entity + # type PyrexType Type of entity + # doc string Doc string + # annotation ExprNode PEP 484/526 annotation + # init string Initial value + # visibility 'private' or 'public' or 'extern' + # is_builtin boolean Is an entry in the Python builtins dict + # is_cglobal boolean Is a C global variable + # is_pyglobal boolean Is a Python module-level variable + # or class attribute during + # class construction + # is_member boolean Is an assigned class member + # is_pyclass_attr boolean Is a name in a Python class namespace + # is_variable boolean Is a variable + # is_cfunction boolean Is a C function + # is_cmethod boolean Is a C method of an extension type + # is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod) + # is_unbound_cmethod boolean Is an unbound C method of an extension type + # is_final_cmethod boolean Is non-overridable C method + # is_inline_cmethod boolean Is inlined C method + # is_anonymous boolean Is a anonymous pyfunction entry + # is_type boolean Is a type definition + # is_cclass boolean Is an extension class + # is_cpp_class boolean Is a C++ class + # is_const boolean Is a constant + # is_property boolean Is a property of an extension type: + # doc_cname string or None C const holding the docstring + # getter_cname string C func for getting property + # setter_cname string C func for setting or deleting property + # is_cproperty boolean Is an inline property of an external type + # is_self_arg boolean Is the "self" arg of an exttype method + # is_arg boolean Is the arg of a method + # is_local boolean Is a local variable + # in_closure boolean Is referenced in an inner scope + # in_subscope boolean Belongs to a generator expression scope + # is_readonly boolean Can't be assigned to + # func_cname string C func implementing Python func + # func_modifiers [string] C function modifiers ('inline') + # pos position Source position where declared + # namespace_cname string If is_pyglobal, the C variable + # holding its home namespace + # pymethdef_cname string PyMethodDef structure + # signature Signature Arg & return types for Python func + # as_variable Entry Alternative interpretation of extension + # type name or builtin C function as a variable + # xdecref_cleanup boolean Use Py_XDECREF for error cleanup + # in_cinclude boolean Suppress C declaration code + # enum_values [Entry] For enum types, list of values + # qualified_name string "modname.funcname" or "modname.classname" + # or "modname.classname.funcname" + # is_declared_generic boolean Is declared as PyObject * even though its + # type is an extension type + # as_module None Module scope, if a cimported module + # is_inherited boolean Is an inherited attribute of an extension type + # pystring_cname string C name of Python version of string literal + # is_interned boolean For string const entries, value is interned + # is_identifier boolean For string const entries, value is an identifier + # used boolean + # is_special boolean Is a special method or property accessor + # of an extension type + # defined_in_pxd boolean Is defined in a .pxd file (not just declared) + # api boolean Generate C API for C class or function + # utility_code string Utility code needed when this entry is used + # + # buffer_aux BufferAux or None Extra information needed for buffer variables + # inline_func_in_pxd boolean Hacky special case for inline function in pxd file. + # Ideally this should not be necessary. + # might_overflow boolean In an arithmetic expression that could cause + # overflow (used for type inference). + # utility_code_definition For some Cython builtins, the utility code + # which contains the definition of the entry. + # Currently only supported for CythonScope entries. + # error_on_uninitialized Have Control Flow issue an error when this entry is + # used uninitialized + # cf_used boolean Entry is used + # is_fused_specialized boolean Whether this entry of a cdef or def function + # is a specialization + # is_cgetter boolean Is a c-level getter function + # is_cpp_optional boolean Entry should be declared as std::optional (cpp_locals directive) + # known_standard_library_import Either None (default), an empty string (definitely can't be determined) + # or a string of "modulename.something.attribute" + # Used for identifying imports from typing/dataclasses etc + # pytyping_modifiers Python type modifiers like "typing.ClassVar" but also "dataclasses.InitVar" + # enum_int_value None or int If known, the int that corresponds to this enum value + + # TODO: utility_code and utility_code_definition serves the same purpose... + + inline_func_in_pxd = False + borrowed = 0 + init = "" + annotation = None + pep563_annotation = None + visibility = 'private' + is_builtin = 0 + is_cglobal = 0 + is_pyglobal = 0 + is_member = 0 + is_pyclass_attr = 0 + is_variable = 0 + is_cfunction = 0 + is_cmethod = 0 + is_builtin_cmethod = False + is_unbound_cmethod = 0 + is_final_cmethod = 0 + is_inline_cmethod = 0 + is_anonymous = 0 + is_type = 0 + is_cclass = 0 + is_cpp_class = 0 + is_const = 0 + is_property = 0 + is_cproperty = 0 + doc_cname = None + getter_cname = None + setter_cname = None + is_self_arg = 0 + is_arg = 0 + is_local = 0 + in_closure = 0 + from_closure = 0 + in_subscope = 0 + is_declared_generic = 0 + is_readonly = 0 + pyfunc_cname = None + func_cname = None + func_modifiers = [] + final_func_cname = None + doc = None + as_variable = None + xdecref_cleanup = 0 + in_cinclude = 0 + as_module = None + is_inherited = 0 + pystring_cname = None + is_identifier = 0 + is_interned = 0 + used = 0 + is_special = 0 + defined_in_pxd = 0 + is_implemented = 0 + api = 0 + utility_code = None + is_overridable = 0 + buffer_aux = None + prev_entry = None + might_overflow = 0 + fused_cfunction = None + is_fused_specialized = False + utility_code_definition = None + needs_property = False + in_with_gil_block = 0 + from_cython_utility_code = None + error_on_uninitialized = False + cf_used = True + outer_entry = None + is_cgetter = False + is_cpp_optional = False + known_standard_library_import = None + pytyping_modifiers = None + enum_int_value = None + vtable_type = None + + def __init__(self, name, cname, type, pos = None, init = None): + self.name = name + self.cname = cname + self.type = type + self.pos = pos + self.init = init + self.overloaded_alternatives = [] + self.cf_assignments = [] + self.cf_references = [] + self.inner_entries = [] + self.defining_entry = self + + def __repr__(self): + return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type) + + def already_declared_here(self): + error(self.pos, "Previous declaration is here") + + def redeclared(self, pos): + error(pos, "'%s' does not match previous declaration" % self.name) + self.already_declared_here() + + def all_alternatives(self): + return [self] + self.overloaded_alternatives + + def all_entries(self): + return [self] + self.inner_entries + + def __lt__(left, right): + if isinstance(left, Entry) and isinstance(right, Entry): + return (left.name, left.cname) < (right.name, right.cname) + else: + return NotImplemented + + @property + def cf_is_reassigned(self): + return len(self.cf_assignments) > 1 + + def make_cpp_optional(self): + assert self.type.is_cpp_class + self.is_cpp_optional = True + assert not self.utility_code # we're not overwriting anything? + self.utility_code_definition = Code.UtilityCode.load_cached("OptionalLocals", "CppSupport.cpp") + + def declared_with_pytyping_modifier(self, modifier_name): + return modifier_name in self.pytyping_modifiers if self.pytyping_modifiers else False + + +class InnerEntry(Entry): + """ + An entry in a closure scope that represents the real outer Entry. + """ + from_closure = True + + def __init__(self, outer_entry, scope): + Entry.__init__(self, outer_entry.name, + outer_entry.cname, + outer_entry.type, + outer_entry.pos) + self.outer_entry = outer_entry + self.scope = scope + + # share state with (outermost) defining entry + outermost_entry = outer_entry + while outermost_entry.outer_entry: + outermost_entry = outermost_entry.outer_entry + self.defining_entry = outermost_entry + self.inner_entries = outermost_entry.inner_entries + self.cf_assignments = outermost_entry.cf_assignments + self.cf_references = outermost_entry.cf_references + self.overloaded_alternatives = outermost_entry.overloaded_alternatives + self.is_cpp_optional = outermost_entry.is_cpp_optional + self.inner_entries.append(self) + + def __getattr__(self, name): + if name.startswith('__'): + # we wouldn't have been called if it was there + raise AttributeError(name) + return getattr(self.defining_entry, name) + + def all_entries(self): + return self.defining_entry.all_entries() + + +class Scope(object): + # name string Unqualified name + # outer_scope Scope or None Enclosing scope + # entries {string : Entry} Python name to entry, non-types + # const_entries [Entry] Constant entries + # type_entries [Entry] Struct/union/enum/typedef/exttype entries + # sue_entries [Entry] Struct/union/enum entries + # arg_entries [Entry] Function argument entries + # var_entries [Entry] User-defined variable entries + # pyfunc_entries [Entry] Python function entries + # cfunc_entries [Entry] C function entries + # c_class_entries [Entry] All extension type entries + # cname_to_entry {string : Entry} Temp cname to entry mapping + # return_type PyrexType or None Return type of function owning scope + # is_builtin_scope boolean Is the builtin scope of Python/Cython + # is_py_class_scope boolean Is a Python class scope + # is_c_class_scope boolean Is an extension type scope + # is_local_scope boolean Is a local (i.e. function/method/generator) scope + # is_closure_scope boolean Is a closure scope + # is_generator_expression_scope boolean A subset of closure scope used for generator expressions + # is_passthrough boolean Outer scope is passed directly + # is_cpp_class_scope boolean Is a C++ class scope + # is_property_scope boolean Is a extension type property scope + # is_c_dataclass_scope boolean or "frozen" is a cython.dataclasses.dataclass + # scope_prefix string Disambiguator for C names + # in_cinclude boolean Suppress C declaration code + # qualified_name string "modname" or "modname.classname" + # Python strings in this scope + # nogil boolean In a nogil section + # directives dict Helper variable for the recursive + # analysis, contains directive values. + # is_internal boolean Is only used internally (simpler setup) + # scope_predefined_names list of str Class variable containing special names defined by + # this type of scope (e.g. __builtins__, __qualname__) + + is_builtin_scope = 0 + is_py_class_scope = 0 + is_c_class_scope = 0 + is_closure_scope = 0 + is_local_scope = False + is_generator_expression_scope = 0 + is_comprehension_scope = 0 + is_passthrough = 0 + is_cpp_class_scope = 0 + is_property_scope = 0 + is_module_scope = 0 + is_c_dataclass_scope = False + is_internal = 0 + scope_prefix = "" + in_cinclude = 0 + nogil = 0 + fused_to_specific = None + return_type = None + scope_predefined_names = [] + # Do ambiguous type names like 'int' and 'float' refer to the C types? (Otherwise, Python types.) + in_c_type_context = True + + def __init__(self, name, outer_scope, parent_scope): + # The outer_scope is the next scope in the lookup chain. + # The parent_scope is used to derive the qualified name of this scope. + self.name = name + self.outer_scope = outer_scope + self.parent_scope = parent_scope + mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_')) + qual_scope = self.qualifying_scope() + if qual_scope: + self.qualified_name = qual_scope.qualify_name(name) + self.scope_prefix = qual_scope.scope_prefix + mangled_name + else: + self.qualified_name = EncodedString(name) + self.scope_prefix = mangled_name + self.entries = {} + self.subscopes = set() + self.const_entries = [] + self.type_entries = [] + self.sue_entries = [] + self.arg_entries = [] + self.var_entries = [] + self.pyfunc_entries = [] + self.cfunc_entries = [] + self.c_class_entries = [] + self.defined_c_classes = [] + self.imported_c_classes = {} + self.cname_to_entry = {} + self.identifier_to_entry = {} + self.num_to_entry = {} + self.obj_to_entry = {} + self.buffer_entries = [] + self.lambda_defs = [] + self.id_counters = {} + for var_name in self.scope_predefined_names: + self.declare_var(EncodedString(var_name), py_object_type, pos=None) + + def __deepcopy__(self, memo): + return self + + def merge_in(self, other, merge_unused=True, allowlist=None): + # Use with care... + entries = [] + for name, entry in other.entries.items(): + if not allowlist or name in allowlist: + if entry.used or merge_unused: + entries.append((name, entry)) + + self.entries.update(entries) + + for attr in ('const_entries', + 'type_entries', + 'sue_entries', + 'arg_entries', + 'var_entries', + 'pyfunc_entries', + 'cfunc_entries', + 'c_class_entries'): + self_entries = getattr(self, attr) + names = set(e.name for e in self_entries) + for entry in getattr(other, attr): + if (entry.used or merge_unused) and entry.name not in names: + self_entries.append(entry) + + def __str__(self): + return "<%s %s>" % (self.__class__.__name__, self.qualified_name) + + def qualifying_scope(self): + return self.parent_scope + + def mangle(self, prefix, name = None): + if name: + return punycodify_name("%s%s%s" % (prefix, self.scope_prefix, name)) + else: + return self.parent_scope.mangle(prefix, self.name) + + def mangle_internal(self, name): + # Mangle an internal name so as not to clash with any + # user-defined name in this scope. + prefix = "%s%s_" % (Naming.pyrex_prefix, name) + return self.mangle(prefix) + #return self.parent_scope.mangle(prefix, self.name) + + def mangle_class_private_name(self, name): + if self.parent_scope: + return self.parent_scope.mangle_class_private_name(name) + return name + + def next_id(self, name=None): + # Return a cname fragment that is unique for this module + counters = self.global_scope().id_counters + try: + count = counters[name] + 1 + except KeyError: + count = 0 + counters[name] = count + if name: + if not count: + # unique names don't need a suffix, reoccurrences will get one + return name + return '%s%d' % (name, count) + else: + return '%d' % count + + def global_scope(self): + """ Return the module-level scope containing this scope. """ + return self.outer_scope.global_scope() + + def builtin_scope(self): + """ Return the module-level scope containing this scope. """ + return self.outer_scope.builtin_scope() + + def iter_local_scopes(self): + yield self + if self.subscopes: + for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')): + yield scope + + @try_finally_contextmanager + def new_c_type_context(self, in_c_type_context=None): + old_c_type_context = self.in_c_type_context + if in_c_type_context is not None: + self.in_c_type_context = in_c_type_context + yield + self.in_c_type_context = old_c_type_context + + def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0): + # Create new entry, and add to dictionary if + # name is not None. Reports a warning if already + # declared. + if type.is_buffer and not isinstance(self, LocalScope): # and not is_type: + error(pos, 'Buffer types only allowed as function local variables') + if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname): + # See https://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names + warning(pos, "'%s' is a reserved name in C." % cname, -1) + + entries = self.entries + if name and name in entries and not shadow and not self.is_builtin_scope: + old_entry = entries[name] + + # Reject redeclared C++ functions only if they have the same type signature. + cpp_override_allowed = False + if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp(): + for alt_entry in old_entry.all_alternatives(): + if type == alt_entry.type: + if name == '' and not type.args: + # Cython pre-declares the no-args constructor - allow later user definitions. + cpp_override_allowed = True + break + else: + cpp_override_allowed = True + + if cpp_override_allowed: + # C++ function/method overrides with different signatures are ok. + pass + elif self.is_cpp_class_scope and entries[name].is_inherited: + # Likewise ignore inherited classes. + pass + elif visibility == 'extern': + # Silenced outside of "cdef extern" blocks, until we have a safe way to + # prevent pxd-defined cpdef functions from ending up here. + warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0) + elif visibility != 'ignore': + error(pos, "'%s' redeclared " % name) + entries[name].already_declared_here() + entry = Entry(name, cname, type, pos = pos) + entry.in_cinclude = self.in_cinclude + entry.create_wrapper = create_wrapper + if name: + entry.qualified_name = self.qualify_name(name) +# if name in entries and self.is_cpp(): +# entries[name].overloaded_alternatives.append(entry) +# else: +# entries[name] = entry + if not shadow: + entries[name] = entry + + if type.is_memoryviewslice: + entry.init = type.default_value + + entry.scope = self + entry.visibility = visibility + return entry + + def qualify_name(self, name): + return EncodedString("%s.%s" % (self.qualified_name, name)) + + def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0, create_wrapper = 0): + # Add an entry for a named constant. + if not cname: + if self.in_cinclude or (visibility == 'public' or api): + cname = name + else: + cname = self.mangle(Naming.enum_prefix, name) + entry = self.declare(name, cname, type, pos, visibility, create_wrapper = create_wrapper) + entry.is_const = 1 + entry.value_node = value + return entry + + def declare_type(self, name, type, pos, + cname = None, visibility = 'private', api = 0, defining = 1, + shadow = 0, template = 0): + # Add an entry for a type definition. + if not cname: + cname = name + entry = self.declare(name, cname, type, pos, visibility, shadow, + is_type=True) + entry.is_type = 1 + entry.api = api + if defining: + self.type_entries.append(entry) + + # don't replace an entry that's already set + if not template and getattr(type, "entry", None) is None: + type.entry = entry + + # here we would set as_variable to an object representing this type + return entry + + def declare_typedef(self, name, base_type, pos, cname = None, + visibility = 'private', api = 0): + if not cname: + if self.in_cinclude or (visibility != 'private' or api): + cname = name + else: + cname = self.mangle(Naming.type_prefix, name) + try: + if self.is_cpp_class_scope: + namespace = self.outer_scope.lookup(self.name).type + else: + namespace = None + type = PyrexTypes.create_typedef_type(name, base_type, cname, + (visibility == 'extern'), + namespace) + except ValueError as e: + error(pos, e.args[0]) + type = PyrexTypes.error_type + entry = self.declare_type(name, type, pos, cname, + visibility = visibility, api = api) + type.qualified_name = entry.qualified_name + return entry + + def declare_struct_or_union(self, name, kind, scope, + typedef_flag, pos, cname = None, + visibility = 'private', api = 0, + packed = False): + # Add an entry for a struct or union definition. + if not cname: + if self.in_cinclude or (visibility == 'public' or api): + cname = name + else: + cname = self.mangle(Naming.type_prefix, name) + entry = self.lookup_here(name) + if not entry: + in_cpp = self.is_cpp() + type = PyrexTypes.CStructOrUnionType( + name, kind, scope, typedef_flag, cname, packed, + in_cpp = in_cpp) + entry = self.declare_type(name, type, pos, cname, + visibility = visibility, api = api, + defining = scope is not None) + self.sue_entries.append(entry) + type.entry = entry + else: + if not (entry.is_type and entry.type.is_struct_or_union + and entry.type.kind == kind): + warning(pos, "'%s' redeclared " % name, 0) + elif scope and entry.type.scope: + warning(pos, "'%s' already defined (ignoring second definition)" % name, 0) + else: + self.check_previous_typedef_flag(entry, typedef_flag, pos) + self.check_previous_visibility(entry, visibility, pos) + if scope: + entry.type.scope = scope + self.type_entries.append(entry) + if self.is_cpp_class_scope: + entry.type.namespace = self.outer_scope.lookup(self.name).type + return entry + + def declare_cpp_class(self, name, scope, + pos, cname = None, base_classes = (), + visibility = 'extern', templates = None): + if cname is None: + if self.in_cinclude or (visibility != 'private'): + cname = name + else: + cname = self.mangle(Naming.type_prefix, name) + base_classes = list(base_classes) + entry = self.lookup_here(name) + if not entry: + type = PyrexTypes.CppClassType( + name, scope, cname, base_classes, templates = templates) + entry = self.declare_type(name, type, pos, cname, + visibility = visibility, defining = scope is not None) + self.sue_entries.append(entry) + else: + if not (entry.is_type and entry.type.is_cpp_class): + error(pos, "'%s' redeclared " % name) + entry.already_declared_here() + return None + elif scope and entry.type.scope: + warning(pos, "'%s' already defined (ignoring second definition)" % name, 0) + else: + if scope: + entry.type.scope = scope + self.type_entries.append(entry) + if base_classes: + if entry.type.base_classes and entry.type.base_classes != base_classes: + error(pos, "Base type does not match previous declaration") + entry.already_declared_here() + else: + entry.type.base_classes = base_classes + if templates or entry.type.templates: + if templates != entry.type.templates: + error(pos, "Template parameters do not match previous declaration") + entry.already_declared_here() + + def declare_inherited_attributes(entry, base_classes): + for base_class in base_classes: + if base_class is PyrexTypes.error_type: + continue + if base_class.scope is None: + error(pos, "Cannot inherit from incomplete type") + else: + declare_inherited_attributes(entry, base_class.base_classes) + entry.type.scope.declare_inherited_cpp_attributes(base_class) + if scope: + declare_inherited_attributes(entry, base_classes) + scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos) + if self.is_cpp_class_scope: + entry.type.namespace = self.outer_scope.lookup(self.name).type + return entry + + def check_previous_typedef_flag(self, entry, typedef_flag, pos): + if typedef_flag != entry.type.typedef_flag: + error(pos, "'%s' previously declared using '%s'" % ( + entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag])) + + def check_previous_visibility(self, entry, visibility, pos): + if entry.visibility != visibility: + error(pos, "'%s' previously declared as '%s'" % ( + entry.name, entry.visibility)) + + def declare_enum(self, name, pos, cname, scoped, typedef_flag, + visibility='private', api=0, create_wrapper=0, doc=None): + if name: + if not cname: + if (self.in_cinclude or visibility == 'public' + or visibility == 'extern' or api): + cname = name + else: + cname = self.mangle(Naming.type_prefix, name) + if self.is_cpp_class_scope: + namespace = self.outer_scope.lookup(self.name).type + else: + namespace = None + + if scoped: + type = PyrexTypes.CppScopedEnumType(name, cname, namespace, doc=doc) + else: + type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace, doc=doc) + else: + type = PyrexTypes.c_anon_enum_type + entry = self.declare_type(name, type, pos, cname = cname, + visibility = visibility, api = api) + if scoped: + entry.utility_code = Code.UtilityCode.load_cached("EnumClassDecl", "CppSupport.cpp") + self.use_entry_utility_code(entry) + entry.create_wrapper = create_wrapper + entry.enum_values = [] + + self.sue_entries.append(entry) + return entry + + def declare_tuple_type(self, pos, components): + return self.outer_scope.declare_tuple_type(pos, components) + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None): + # Add an entry for a variable. + if not cname: + if visibility != 'private' or api: + cname = name + else: + cname = self.mangle(Naming.var_prefix, name) + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = 1 + if type.is_cpp_class and visibility != 'extern': + if self.directives['cpp_locals']: + entry.make_cpp_optional() + else: + type.check_nullary_constructor(pos) + if in_pxd and visibility != 'extern': + entry.defined_in_pxd = 1 + entry.used = 1 + if api: + entry.api = 1 + entry.used = 1 + if pytyping_modifiers: + entry.pytyping_modifiers = pytyping_modifiers + return entry + + def _reject_pytyping_modifiers(self, pos, modifiers, allowed=()): + if not modifiers: + return + for modifier in modifiers: + if modifier not in allowed: + error(pos, "Modifier '%s' is not allowed here." % modifier) + + def declare_assignment_expression_target(self, name, type, pos): + # In most cases declares the variable as normal. + # For generator expressions and comprehensions the variable is declared in their parent + return self.declare_var(name, type, pos) + + def declare_builtin(self, name, pos): + name = self.mangle_class_private_name(name) + return self.outer_scope.declare_builtin(name, pos) + + def _declare_pyfunction(self, name, pos, visibility='extern', entry=None): + if entry and not entry.type.is_cfunction: + error(pos, "'%s' already declared" % name) + error(entry.pos, "Previous declaration is here") + entry = self.declare_var(name, py_object_type, pos, visibility=visibility) + entry.signature = pyfunction_signature + self.pyfunc_entries.append(entry) + return entry + + def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'): + # Add an entry for a Python function. + entry = self.lookup_here(name) + if not allow_redefine: + return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry) + if entry: + if entry.type.is_unspecified: + entry.type = py_object_type + elif entry.type is not py_object_type: + return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry) + else: # declare entry stub + self.declare_var(name, py_object_type, pos, visibility=visibility) + entry = self.declare_var(None, py_object_type, pos, + cname=name, visibility='private') + entry.name = EncodedString(name) + entry.qualified_name = self.qualify_name(name) + entry.signature = pyfunction_signature + entry.is_anonymous = True + return entry + + def declare_lambda_function(self, lambda_name, pos): + # Add an entry for an anonymous Python function. + func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name) + pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name) + qualified_name = self.qualify_name(lambda_name) + + entry = self.declare(None, func_cname, py_object_type, pos, 'private') + entry.name = EncodedString(lambda_name) + entry.qualified_name = qualified_name + entry.pymethdef_cname = pymethdef_cname + entry.func_cname = func_cname + entry.signature = pyfunction_signature + entry.is_anonymous = True + return entry + + def add_lambda_def(self, def_node): + self.lambda_defs.append(def_node) + + def register_pyfunction(self, entry): + self.pyfunc_entries.append(entry) + + def declare_cfunction(self, name, type, pos, + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): + # Add an entry for a C function. + if not cname: + if visibility != 'private' or api: + cname = name + else: + cname = self.mangle(Naming.func_prefix, name) + inline_in_pxd = 'inline' in modifiers and in_pxd and defining + if inline_in_pxd: + # in_pxd does special things that we don't want to apply to inline functions + in_pxd = False + entry = self.lookup_here(name) + if entry: + if not in_pxd and visibility != entry.visibility and visibility == 'extern': + # Previously declared, but now extern => treat this + # as implementing the function, using the new cname + defining = True + visibility = entry.visibility + entry.cname = cname + entry.func_cname = cname + if visibility != 'private' and visibility != entry.visibility: + warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % ( + name, entry.visibility, visibility), 1) + if overridable != entry.is_overridable: + warning(pos, "Function '%s' previously declared as '%s'" % ( + name, 'cpdef' if overridable else 'cdef'), 1) + if entry.type.same_as(type): + # Fix with_gil vs nogil. + entry.type = entry.type.with_with_gil(type.with_gil) + else: + if visibility == 'extern' and entry.visibility == 'extern': + can_override = self.is_builtin_scope + if self.is_cpp(): + can_override = True + elif cname and not can_override: + # if all alternatives have different cnames, + # it's safe to allow signature overrides + for alt_entry in entry.all_alternatives(): + if not alt_entry.cname or cname == alt_entry.cname: + break # cname not unique! + else: + can_override = True + if can_override: + temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers) + temp.overloaded_alternatives = entry.all_alternatives() + entry = temp + else: + warning(pos, "Function signature does not match previous declaration", 1) + entry.type = type + elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type): + # TODO: check that this was done by a signature optimisation and not a user error. + #warning(pos, "Function signature does not match previous declaration", 1) + + # Cython can't assume anything about cimported functions declared without + # an exception value. This is a performance problem mainly for nogil functions. + if entry.type.nogil and entry.type.exception_value is None and type.exception_value: + performance_hint( + entry.pos, + "No exception value declared for '%s' in pxd file.\n" + "Users cimporting this function and calling it without the gil " + "will always require an exception check.\n" + "Suggest adding an explicit exception value." % entry.name, + self) + entry.type = type + else: + error(pos, "Function signature does not match previous declaration") + else: + entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers) + entry.func_cname = cname + entry.is_overridable = overridable + if inline_in_pxd: + entry.inline_func_in_pxd = True + if in_pxd and visibility != 'extern': + entry.defined_in_pxd = 1 + if api: + entry.api = 1 + if not defining and not in_pxd and visibility != 'extern': + error(pos, "Non-extern C function '%s' declared but not defined" % name) + if defining: + entry.is_implemented = True + if modifiers: + entry.func_modifiers = modifiers + if utility_code: + assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname) + entry.utility_code = utility_code + if overridable: + # names of cpdef functions can be used as variables and can be assigned to + var_entry = Entry(name, cname, py_object_type) # FIXME: cname? + var_entry.qualified_name = self.qualify_name(name) + var_entry.is_variable = 1 + var_entry.is_pyglobal = 1 + var_entry.scope = entry.scope + entry.as_variable = var_entry + type.entry = entry + if (type.exception_check and type.exception_value is None and type.nogil and + not pos[0].in_utility_code and + # don't warn about external functions here - the user likely can't do anything + defining and not in_pxd and not inline_in_pxd): + PyrexTypes.write_noexcept_performance_hint( + pos, self, function_name=name, void_return=type.return_type.is_void) + return entry + + def declare_cgetter(self, name, return_type, pos=None, cname=None, + visibility="private", modifiers=(), defining=False, **cfunc_type_config): + assert all( + k in ('exception_value', 'exception_check', 'nogil', 'with_gil', 'is_const_method', 'is_static_method') + for k in cfunc_type_config + ) + cfunc_type = PyrexTypes.CFuncType( + return_type, + [PyrexTypes.CFuncTypeArg("self", self.parent_type, None)], + **cfunc_type_config) + entry = self.declare_cfunction( + name, cfunc_type, pos, cname=None, visibility=visibility, modifiers=modifiers, defining=defining) + entry.is_cgetter = True + if cname is not None: + entry.func_cname = cname + return entry + + def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False): + # Add a C function entry without giving it a func_cname. + entry = self.declare(name, cname, type, pos, visibility) + entry.is_cfunction = 1 + if modifiers: + entry.func_modifiers = modifiers + if inherited or type.is_fused: + self.cfunc_entries.append(entry) + else: + # For backwards compatibility reasons, we must keep all non-fused methods + # before all fused methods, but separately for each type. + i = len(self.cfunc_entries) + for cfunc_entry in reversed(self.cfunc_entries): + if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused: + break + i -= 1 + self.cfunc_entries.insert(i, entry) + return entry + + def find(self, name, pos): + # Look up name, report error if not found. + entry = self.lookup(name) + if entry: + return entry + else: + error(pos, "'%s' is not declared" % name) + + def find_imported_module(self, path, pos): + # Look up qualified name, must be a module, report error if not found. + # Path is a list of names. + scope = self + for name in path: + entry = scope.find(name, pos) + if not entry: + return None + if entry.as_module: + scope = entry.as_module + else: + error(pos, "'%s' is not a cimported module" % '.'.join(path)) + return None + return scope + + def lookup(self, name): + # Look up name in this scope or an enclosing one. + # Return None if not found. + + mangled_name = self.mangle_class_private_name(name) + entry = (self.lookup_here(name) # lookup here also does mangling + or (self.outer_scope and self.outer_scope.lookup(mangled_name)) + or None) + if entry: + return entry + + # look up the original name in the outer scope + # Not strictly Python behaviour but see https://github.com/cython/cython/issues/3544 + entry = (self.outer_scope and self.outer_scope.lookup(name)) or None + if entry and entry.is_pyglobal: + self._emit_class_private_warning(entry.pos, name) + return entry + + def lookup_here(self, name): + # Look up in this scope only, return None if not found. + + entry = self.entries.get(self.mangle_class_private_name(name), None) + if entry: + return entry + # Also check the unmangled name in the current scope + # (even if mangling should give us something else). + # This is to support things like global __foo which makes a declaration for __foo + return self.entries.get(name, None) + + def lookup_here_unmangled(self, name): + return self.entries.get(name, None) + + def lookup_assignment_expression_target(self, name): + # For most cases behaves like "lookup_here". + # However, it does look outwards for comprehension and generator expression scopes + return self.lookup_here(name) + + def lookup_target(self, name): + # Look up name in this scope only. Declare as Python + # variable if not found. + entry = self.lookup_here(name) + if not entry: + entry = self.lookup_here_unmangled(name) + if entry and entry.is_pyglobal: + self._emit_class_private_warning(entry.pos, name) + if not entry: + entry = self.declare_var(name, py_object_type, None) + return entry + + def _type_or_specialized_type_from_entry(self, entry): + if entry and entry.is_type: + if entry.type.is_fused and self.fused_to_specific: + return entry.type.specialize(self.fused_to_specific) + return entry.type + + def lookup_type(self, name): + entry = self.lookup(name) + # The logic here is: + # 1. if entry is a type then return it (and maybe specialize it) + # 2. if the entry comes from a known standard library import then follow that + # 3. repeat step 1 with the (possibly) updated entry + + tp = self._type_or_specialized_type_from_entry(entry) + if tp: + return tp + # allow us to find types from the "typing" module and similar + if entry and entry.known_standard_library_import: + from .Builtin import get_known_standard_library_entry + entry = get_known_standard_library_entry(entry.known_standard_library_import) + return self._type_or_specialized_type_from_entry(entry) + + def lookup_operator(self, operator, operands): + if operands[0].type.is_cpp_class: + obj_type = operands[0].type + method = obj_type.scope.lookup("operator%s" % operator) + if method is not None: + arg_types = [arg.type for arg in operands[1:]] + res = PyrexTypes.best_match(arg_types, method.all_alternatives()) + if res is not None: + return res + function = self.lookup("operator%s" % operator) + function_alternatives = [] + if function is not None: + function_alternatives = function.all_alternatives() + + # look-up nonmember methods listed within a class + method_alternatives = [] + if len(operands) == 2: # binary operators only + for n in range(2): + if operands[n].type.is_cpp_class: + obj_type = operands[n].type + method = obj_type.scope.lookup("operator%s" % operator) + if method is not None: + method_alternatives += method.all_alternatives() + + if (not method_alternatives) and (not function_alternatives): + return None + + # select the unique alternatives + all_alternatives = list(set(method_alternatives + function_alternatives)) + + return PyrexTypes.best_match([arg.type for arg in operands], + all_alternatives) + + def lookup_operator_for_types(self, pos, operator, types): + from .Nodes import Node + class FakeOperand(Node): + pass + operands = [FakeOperand(pos, type=type) for type in types] + return self.lookup_operator(operator, operands) + + def _emit_class_private_warning(self, pos, name): + warning(pos, "Global name %s matched from within class scope " + "in contradiction to to Python 'class private name' rules. " + "This may change in a future release." % name, 1) + + def use_utility_code(self, new_code): + self.global_scope().use_utility_code(new_code) + + def use_entry_utility_code(self, entry): + self.global_scope().use_entry_utility_code(entry) + + def defines_any(self, names): + # Test whether any of the given names are defined in this scope. + for name in names: + if name in self.entries: + return 1 + return 0 + + def defines_any_special(self, names): + # Test whether any of the given names are defined as special methods in this scope. + for name in names: + if name in self.entries and self.entries[name].is_special: + return 1 + return 0 + + def infer_types(self): + from .TypeInference import get_type_inferer + get_type_inferer().infer_types(self) + + def is_cpp(self): + outer = self.outer_scope + if outer is None: + return False + else: + return outer.is_cpp() + + def add_include_file(self, filename, verbatim_include=None, late=False): + self.outer_scope.add_include_file(filename, verbatim_include, late) + + +class PreImportScope(Scope): + + namespace_cname = Naming.preimport_cname + + def __init__(self): + Scope.__init__(self, Options.pre_import, None, None) + + def declare_builtin(self, name, pos): + entry = self.declare(name, name, py_object_type, pos, 'private') + entry.is_variable = True + entry.is_pyglobal = True + return entry + + +class BuiltinScope(Scope): + # The builtin namespace. + + is_builtin_scope = True + + def __init__(self): + if Options.pre_import is None: + Scope.__init__(self, "__builtin__", None, None) + else: + Scope.__init__(self, "__builtin__", PreImportScope(), None) + self.type_names = {} + + # Most entries are initialized in init_builtins, except for "bool" + # which is apparently a special case because it conflicts with C++ bool + self.declare_var("bool", py_object_type, None, "((PyObject*)&PyBool_Type)") + + def lookup(self, name, language_level=None, str_is_str=None): + # 'language_level' and 'str_is_str' are passed by ModuleScope + if name == 'str': + if str_is_str is None: + str_is_str = language_level in (None, 2) + if not str_is_str: + name = 'unicode' + return Scope.lookup(self, name) + + def declare_builtin(self, name, pos): + if not hasattr(builtins, name): + if self.outer_scope is not None: + return self.outer_scope.declare_builtin(name, pos) + else: + if Options.error_on_unknown_names: + error(pos, "undeclared name not builtin: %s" % name) + else: + warning(pos, "undeclared name not builtin: %s" % name, 2) + + def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None): + # If python_equiv == "*", the Python equivalent has the same name + # as the entry, otherwise it has the name specified by python_equiv. + name = EncodedString(name) + entry = self.declare_cfunction(name, type, None, cname, visibility='extern', utility_code=utility_code) + if python_equiv: + if python_equiv == "*": + python_equiv = name + else: + python_equiv = EncodedString(python_equiv) + var_entry = Entry(python_equiv, python_equiv, py_object_type) + var_entry.qualified_name = self.qualify_name(name) + var_entry.is_variable = 1 + var_entry.is_builtin = 1 + var_entry.utility_code = utility_code + var_entry.scope = entry.scope + entry.as_variable = var_entry + return entry + + def declare_builtin_type(self, name, cname, utility_code=None, + objstruct_cname=None, type_class=PyrexTypes.BuiltinObjectType): + name = EncodedString(name) + type = type_class(name, cname, objstruct_cname) + scope = CClassScope(name, outer_scope=None, visibility='extern', parent_type=type) + scope.directives = {} + if name == 'bool': + type.is_final_type = True + type.set_scope(scope) + self.type_names[name] = 1 + entry = self.declare_type(name, type, None, visibility='extern') + entry.utility_code = utility_code + + var_entry = Entry( + name=entry.name, + type=self.lookup('type').type, # make sure "type" is the first type declared... + pos=entry.pos, + cname=entry.type.typeptr_cname, + ) + var_entry.qualified_name = self.qualify_name(name) + var_entry.is_variable = 1 + var_entry.is_cglobal = 1 + var_entry.is_readonly = 1 + var_entry.is_builtin = 1 + var_entry.utility_code = utility_code + var_entry.scope = self + if Options.cache_builtins: + var_entry.is_const = True + entry.as_variable = var_entry + + return type + + def builtin_scope(self): + return self + + +const_counter = 1 # As a temporary solution for compiling code in pxds + +class ModuleScope(Scope): + # module_name string Python name of the module + # module_cname string C name of Python module object + # #module_dict_cname string C name of module dict object + # method_table_cname string C name of method table + # doc string Module doc string + # doc_cname string C name of module doc string + # utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py + # c_includes {key: IncludeCode} C headers or verbatim code to be generated + # See process_include() for more documentation + # identifier_to_entry {string : Entry} Map identifier string const to entry + # context Context + # parent_module Scope Parent in the import namespace + # module_entries {string : Entry} For cimport statements + # type_names {string : 1} Set of type names (used during parsing) + # included_files [string] Cython sources included with 'include' + # pxd_file_loaded boolean Corresponding .pxd file has been processed + # cimported_modules [ModuleScope] Modules imported with cimport + # types_imported {PyrexType} Set of types for which import code generated + # has_import_star boolean Module contains import * + # cpp boolean Compiling a C++ file + # is_cython_builtin boolean Is this the Cython builtin scope (or a child scope) + # is_package boolean Is this a package module? (__init__) + + is_module_scope = 1 + has_import_star = 0 + is_cython_builtin = 0 + old_style_globals = 0 + scope_predefined_names = [ + '__builtins__', '__name__', '__file__', '__doc__', '__path__', + '__spec__', '__loader__', '__package__', '__cached__', + ] + + def __init__(self, name, parent_module, context, is_package=False): + from . import Builtin + self.parent_module = parent_module + outer_scope = Builtin.builtin_scope + Scope.__init__(self, name, outer_scope, parent_module) + self.is_package = is_package + self.module_name = name + self.module_name = EncodedString(self.module_name) + self.context = context + self.module_cname = Naming.module_cname + self.module_dict_cname = Naming.moddict_cname + self.method_table_cname = Naming.methtable_cname + self.doc = "" + self.doc_cname = Naming.moddoc_cname + self.utility_code_list = [] + self.module_entries = {} + self.c_includes = {} + self.type_names = dict(outer_scope.type_names) + self.pxd_file_loaded = 0 + self.cimported_modules = [] + self.types_imported = set() + self.included_files = [] + self.has_extern_class = 0 + self.cached_builtins = [] + self.undeclared_cached_builtins = [] + self.namespace_cname = self.module_cname + self._cached_tuple_types = {} + self.process_include(Code.IncludeCode("Python.h", initial=True)) + + def qualifying_scope(self): + return self.parent_module + + def global_scope(self): + return self + + def lookup(self, name, language_level=None, str_is_str=None): + entry = self.lookup_here(name) + if entry is not None: + return entry + + if language_level is None: + language_level = self.context.language_level if self.context is not None else 3 + if str_is_str is None: + str_is_str = language_level == 2 or ( + self.context is not None and Future.unicode_literals not in self.context.future_directives) + + return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str) + + def declare_tuple_type(self, pos, components): + components = tuple(components) + try: + ttype = self._cached_tuple_types[components] + except KeyError: + ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components) + cname = ttype.cname + entry = self.lookup_here(cname) + if not entry: + scope = StructOrUnionScope(cname) + for ix, component in enumerate(components): + scope.declare_var(name="f%s" % ix, type=component, pos=pos) + struct_entry = self.declare_struct_or_union( + cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname) + self.type_entries.remove(struct_entry) + ttype.struct_entry = struct_entry + entry = self.declare_type(cname, ttype, pos, cname) + ttype.entry = entry + return entry + + def declare_builtin(self, name, pos): + if not hasattr(builtins, name) \ + and name not in Code.non_portable_builtins_map \ + and name not in Code.uncachable_builtins: + if self.has_import_star: + entry = self.declare_var(name, py_object_type, pos) + return entry + else: + if Options.error_on_unknown_names: + error(pos, "undeclared name not builtin: %s" % name) + else: + warning(pos, "undeclared name not builtin: %s" % name, 2) + # unknown - assume it's builtin and look it up at runtime + entry = self.declare(name, None, py_object_type, pos, 'private') + entry.is_builtin = 1 + return entry + if Options.cache_builtins: + for entry in self.cached_builtins: + if entry.name == name: + return entry + if name == 'globals' and not self.old_style_globals: + return self.outer_scope.lookup('__Pyx_Globals') + else: + entry = self.declare(None, None, py_object_type, pos, 'private') + if Options.cache_builtins and name not in Code.uncachable_builtins: + entry.is_builtin = 1 + entry.is_const = 1 # cached + entry.name = name + entry.cname = Naming.builtin_prefix + name + self.cached_builtins.append(entry) + self.undeclared_cached_builtins.append(entry) + else: + entry.is_builtin = 1 + entry.name = name + entry.qualified_name = self.builtin_scope().qualify_name(name) + return entry + + def find_module(self, module_name, pos, relative_level=-1): + # Find a module in the import namespace, interpreting + # relative imports relative to this module's parent. + # Finds and parses the module's .pxd file if the module + # has not been referenced before. + is_relative_import = relative_level is not None and relative_level > 0 + from_module = None + absolute_fallback = False + if relative_level is not None and relative_level > 0: + # explicit relative cimport + # error of going beyond top-level is handled in cimport node + from_module = self + + top_level = 1 if self.is_package else 0 + # * top_level == 1 when file is __init__.pyx, current package (from_module) is the current module + # i.e. dot in `from . import ...` points to the current package + # * top_level == 0 when file is regular module, current package (from_module) is parent module + # i.e. dot in `from . import ...` points to the package where module is placed + while relative_level > top_level and from_module: + from_module = from_module.parent_module + relative_level -= 1 + + elif relative_level != 0: + # -1 or None: try relative cimport first, then absolute + from_module = self.parent_module + absolute_fallback = True + + module_scope = self.global_scope() + return module_scope.context.find_module( + module_name, from_module=from_module, pos=pos, absolute_fallback=absolute_fallback, relative_import=is_relative_import) + + def find_submodule(self, name, as_package=False): + # Find and return scope for a submodule of this module, + # creating a new empty one if necessary. Doesn't parse .pxd. + if '.' in name: + name, submodule = name.split('.', 1) + else: + submodule = None + scope = self.lookup_submodule(name) + if not scope: + scope = ModuleScope(name, parent_module=self, context=self.context, is_package=True if submodule else as_package) + self.module_entries[name] = scope + if submodule: + scope = scope.find_submodule(submodule, as_package=as_package) + return scope + + def lookup_submodule(self, name): + # Return scope for submodule of this module, or None. + if '.' in name: + name, submodule = name.split('.', 1) + else: + submodule = None + module = self.module_entries.get(name, None) + if submodule and module is not None: + module = module.lookup_submodule(submodule) + return module + + def add_include_file(self, filename, verbatim_include=None, late=False): + """ + Add `filename` as include file. Add `verbatim_include` as + verbatim text in the C file. + Both `filename` and `verbatim_include` can be `None` or empty. + """ + inc = Code.IncludeCode(filename, verbatim_include, late=late) + self.process_include(inc) + + def process_include(self, inc): + """ + Add `inc`, which is an instance of `IncludeCode`, to this + `ModuleScope`. This either adds a new element to the + `c_includes` dict or it updates an existing entry. + + In detail: the values of the dict `self.c_includes` are + instances of `IncludeCode` containing the code to be put in the + generated C file. The keys of the dict are needed to ensure + uniqueness in two ways: if an include file is specified in + multiple "cdef extern" blocks, only one `#include` statement is + generated. Second, the same include might occur multiple times + if we find it through multiple "cimport" paths. So we use the + generated code (of the form `#include "header.h"`) as dict key. + + If verbatim code does not belong to any include file (i.e. it + was put in a `cdef extern from *` block), then we use a unique + dict key: namely, the `sortkey()`. + + One `IncludeCode` object can contain multiple pieces of C code: + one optional "main piece" for the include file and several other + pieces for the verbatim code. The `IncludeCode.dict_update` + method merges the pieces of two different `IncludeCode` objects + if needed. + """ + key = inc.mainpiece() + if key is None: + key = inc.sortkey() + inc.dict_update(self.c_includes, key) + inc = self.c_includes[key] + + def add_imported_module(self, scope): + if scope not in self.cimported_modules: + for inc in scope.c_includes.values(): + self.process_include(inc) + self.cimported_modules.append(scope) + for m in scope.cimported_modules: + self.add_imported_module(m) + + def add_imported_entry(self, name, entry, pos): + if entry.is_pyglobal: + # Allow cimports to follow imports. + entry.is_variable = True + if entry not in self.entries: + self.entries[name] = entry + else: + warning(pos, "'%s' redeclared " % name, 0) + + def declare_module(self, name, scope, pos): + # Declare a cimported module. This is represented as a + # Python module-level variable entry with a module + # scope attached to it. Reports an error and returns + # None if previously declared as something else. + entry = self.lookup_here(name) + if entry: + if entry.is_pyglobal and entry.as_module is scope: + return entry # Already declared as the same module + if not (entry.is_pyglobal and not entry.as_module): + # SAGE -- I put this here so Pyrex + # cimport's work across directories. + # Currently it tries to multiply define + # every module appearing in an import list. + # It shouldn't be an error for a module + # name to appear again, and indeed the generated + # code compiles fine. + return entry + else: + entry = self.declare_var(name, py_object_type, pos) + entry.is_variable = 0 + entry.as_module = scope + self.add_imported_module(scope) + return entry + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None): + # Add an entry for a global variable. If it is a Python + # object type, and not declared with cdef, it will live + # in the module dictionary, otherwise it will be a C + # global variable. + if visibility not in ('private', 'public', 'extern'): + error(pos, "Module-level variable cannot be declared %s" % visibility) + self._reject_pytyping_modifiers(pos, pytyping_modifiers, ('typing.Optional',)) # let's allow at least this one + if not is_cdef: + if type is unspecified_type: + type = py_object_type + if not (type.is_pyobject and not type.is_extension_type): + raise InternalError( + "Non-cdef global variable is not a generic Python object") + + if not cname: + defining = not in_pxd + if visibility == 'extern' or (visibility == 'public' and defining): + cname = name + else: + cname = self.mangle(Naming.var_prefix, name) + + entry = self.lookup_here(name) + if entry and entry.defined_in_pxd: + #if visibility != 'private' and visibility != entry.visibility: + # warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1) + if not entry.type.same_as(type): + if visibility == 'extern' and entry.visibility == 'extern': + warning(pos, "Variable '%s' type does not match previous declaration" % name, 1) + entry.type = type + #else: + # error(pos, "Variable '%s' type does not match previous declaration" % name) + if entry.visibility != "private": + mangled_cname = self.mangle(Naming.var_prefix, name) + if entry.cname == mangled_cname: + cname = name + entry.cname = name + if not entry.is_implemented: + entry.is_implemented = True + return entry + + entry = Scope.declare_var(self, name, type, pos, + cname=cname, visibility=visibility, + api=api, in_pxd=in_pxd, is_cdef=is_cdef, pytyping_modifiers=pytyping_modifiers) + if is_cdef: + entry.is_cglobal = 1 + if entry.type.declaration_value: + entry.init = entry.type.declaration_value + self.var_entries.append(entry) + else: + entry.is_pyglobal = 1 + if Options.cimport_from_pyx: + entry.used = 1 + return entry + + def declare_cfunction(self, name, type, pos, + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): + if not defining and 'inline' in modifiers: + # TODO(github/1736): Make this an error. + warning(pos, "Declarations should not be declared inline.", 1) + # Add an entry for a C function. + if not cname: + if visibility == 'extern' or (visibility == 'public' and defining): + cname = name + else: + cname = self.mangle(Naming.func_prefix, name) + if visibility == 'extern' and type.optional_arg_count: + error(pos, "Extern functions cannot have default arguments values.") + entry = self.lookup_here(name) + if entry and entry.defined_in_pxd: + if entry.visibility != "private": + mangled_cname = self.mangle(Naming.func_prefix, name) + if entry.cname == mangled_cname: + cname = name + entry.cname = cname + entry.func_cname = cname + entry = Scope.declare_cfunction( + self, name, type, pos, + cname=cname, visibility=visibility, api=api, in_pxd=in_pxd, + defining=defining, modifiers=modifiers, utility_code=utility_code, + overridable=overridable) + return entry + + def declare_global(self, name, pos): + entry = self.lookup_here(name) + if not entry: + self.declare_var(name, py_object_type, pos) + + def use_utility_code(self, new_code): + if new_code is not None: + self.utility_code_list.append(new_code) + + def use_entry_utility_code(self, entry): + if entry is None: + return + if entry.utility_code: + self.utility_code_list.append(entry.utility_code) + if entry.utility_code_definition: + self.utility_code_list.append(entry.utility_code_definition) + + def declare_c_class(self, name, pos, defining=0, implementing=0, + module_name=None, base_type=None, objstruct_cname=None, + typeobj_cname=None, typeptr_cname=None, visibility='private', + typedef_flag=0, api=0, check_size=None, + buffer_defaults=None, shadow=0): + # If this is a non-extern typedef class, expose the typedef, but use + # the non-typedef struct internally to avoid needing forward + # declarations for anonymous structs. + if typedef_flag and visibility != 'extern': + if not (visibility == 'public' or api): + warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2) + objtypedef_cname = objstruct_cname + typedef_flag = 0 + else: + objtypedef_cname = None + # + # Look for previous declaration as a type + # + entry = self.lookup_here(name) + if entry and not shadow: + type = entry.type + if not (entry.is_type and type.is_extension_type): + entry = None # Will cause redeclaration and produce an error + else: + scope = type.scope + if typedef_flag and (not scope or scope.defined): + self.check_previous_typedef_flag(entry, typedef_flag, pos) + if (scope and scope.defined) or (base_type and type.base_type): + if base_type and base_type is not type.base_type: + error(pos, "Base type does not match previous declaration") + if base_type and not type.base_type: + type.base_type = base_type + # + # Make a new entry if needed + # + if not entry or shadow: + type = PyrexTypes.PyExtensionType( + name, typedef_flag, base_type, visibility == 'extern', check_size=check_size) + type.pos = pos + type.buffer_defaults = buffer_defaults + if objtypedef_cname is not None: + type.objtypedef_cname = objtypedef_cname + if visibility == 'extern': + type.module_name = module_name + else: + type.module_name = self.qualified_name + if typeptr_cname: + type.typeptr_cname = typeptr_cname + else: + type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name) + entry = self.declare_type(name, type, pos, visibility = visibility, + defining = 0, shadow = shadow) + entry.is_cclass = True + if objstruct_cname: + type.objstruct_cname = objstruct_cname + elif not entry.in_cinclude: + type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name) + else: + error(entry.pos, + "Object name required for 'public' or 'extern' C class") + self.attach_var_entry_to_c_class(entry) + self.c_class_entries.append(entry) + # + # Check for re-definition and create scope if needed + # + if not type.scope: + if defining or implementing: + scope = CClassScope(name = name, outer_scope = self, + visibility=visibility, + parent_type=type) + scope.directives = self.directives.copy() + if base_type and base_type.scope: + scope.declare_inherited_c_attributes(base_type.scope) + type.set_scope(scope) + self.type_entries.append(entry) + else: + if defining and type.scope.defined: + error(pos, "C class '%s' already defined" % name) + elif implementing and type.scope.implemented: + error(pos, "C class '%s' already implemented" % name) + # + # Fill in options, checking for compatibility with any previous declaration + # + if defining: + entry.defined_in_pxd = 1 + if implementing: # So that filenames in runtime exceptions refer to + entry.pos = pos # the .pyx file and not the .pxd file + if visibility != 'private' and entry.visibility != visibility: + error(pos, "Class '%s' previously declared as '%s'" + % (name, entry.visibility)) + if api: + entry.api = 1 + if objstruct_cname: + if type.objstruct_cname and type.objstruct_cname != objstruct_cname: + error(pos, "Object struct name differs from previous declaration") + type.objstruct_cname = objstruct_cname + if typeobj_cname: + if type.typeobj_cname and type.typeobj_cname != typeobj_cname: + error(pos, "Type object name differs from previous declaration") + type.typeobj_cname = typeobj_cname + + if self.directives.get('final'): + entry.type.is_final_type = True + collection_type = self.directives.get('collection_type') + if collection_type: + from .UtilityCode import NonManglingModuleScope + if not isinstance(self, NonManglingModuleScope): + # TODO - DW would like to make it public, but I'm making it internal-only + # for now to avoid adding new features without consensus + error(pos, "'collection_type' is not a public cython directive") + if collection_type == 'sequence': + entry.type.has_sequence_flag = True + + # cdef classes are always exported, but we need to set it to + # distinguish between unused Cython utility code extension classes + entry.used = True + + # + # Return new or existing entry + # + return entry + + def allocate_vtable_names(self, entry): + # If extension type has a vtable, allocate vtable struct and + # slot names for it. + type = entry.type + if type.base_type and type.base_type.vtabslot_cname: + #print "...allocating vtabslot_cname because base type has one" ### + type.vtabslot_cname = "%s.%s" % ( + Naming.obj_base_cname, type.base_type.vtabslot_cname) + elif type.scope and type.scope.cfunc_entries: + # one special case here: when inheriting from builtin + # types, the methods may also be built-in, in which + # case they won't need a vtable + entry_count = len(type.scope.cfunc_entries) + base_type = type.base_type + while base_type: + # FIXME: this will break if we ever get non-inherited C methods + if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries): + break + if base_type.is_builtin_type: + # builtin base type defines all methods => no vtable needed + return + base_type = base_type.base_type + #print "...allocating vtabslot_cname because there are C methods" ### + type.vtabslot_cname = Naming.vtabslot_cname + if type.vtabslot_cname: + #print "...allocating other vtable related cnames" ### + type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name) + type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name) + + def check_c_classes_pxd(self): + # Performs post-analysis checking and finishing up of extension types + # being implemented in this module. This is called only for the .pxd. + # + # Checks all extension types declared in this scope to + # make sure that: + # + # * The extension type is fully declared + # + # Also allocates a name for the vtable if needed. + # + for entry in self.c_class_entries: + # Check defined + if not entry.type.scope: + error(entry.pos, "C class '%s' is declared but not defined" % entry.name) + + def check_c_class(self, entry): + type = entry.type + name = entry.name + visibility = entry.visibility + # Check defined + if not type.scope: + error(entry.pos, "C class '%s' is declared but not defined" % name) + # Generate typeobj_cname + if visibility != 'extern' and not type.typeobj_cname: + type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name) + ## Generate typeptr_cname + #type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name) + # Check C methods defined + if type.scope: + for method_entry in type.scope.cfunc_entries: + if not method_entry.is_inherited and not method_entry.func_cname: + error(method_entry.pos, "C method '%s' is declared but not defined" % + method_entry.name) + # Allocate vtable name if necessary + if type.vtabslot_cname: + #print "ModuleScope.check_c_classes: allocating vtable cname for", self ### + type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name) + + def check_c_classes(self): + # Performs post-analysis checking and finishing up of extension types + # being implemented in this module. This is called only for the main + # .pyx file scope, not for cimported .pxd scopes. + # + # Checks all extension types declared in this scope to + # make sure that: + # + # * The extension type is implemented + # * All required object and type names have been specified or generated + # * All non-inherited C methods are implemented + # + # Also allocates a name for the vtable if needed. + # + debug_check_c_classes = 0 + if debug_check_c_classes: + print("Scope.check_c_classes: checking scope " + self.qualified_name) + for entry in self.c_class_entries: + if debug_check_c_classes: + print("...entry %s %s" % (entry.name, entry)) + print("......type = ", entry.type) + print("......visibility = ", entry.visibility) + self.check_c_class(entry) + + def check_c_functions(self): + # Performs post-analysis checking making sure all + # defined c functions are actually implemented. + for name, entry in self.entries.items(): + if entry.is_cfunction: + if (entry.defined_in_pxd + and entry.scope is self + and entry.visibility != 'extern' + and not entry.in_cinclude + and not entry.is_implemented): + error(entry.pos, "Non-extern C function '%s' declared but not defined" % name) + + def attach_var_entry_to_c_class(self, entry): + # The name of an extension class has to serve as both a type + # name and a variable name holding the type object. It is + # represented in the symbol table by a type entry with a + # variable entry attached to it. For the variable entry, + # we use a read-only C global variable whose name is an + # expression that refers to the type object. + from . import Builtin + var_entry = Entry(name = entry.name, + type = Builtin.type_type, + pos = entry.pos, + cname = entry.type.typeptr_cname) + var_entry.qualified_name = entry.qualified_name + var_entry.is_variable = 1 + var_entry.is_cglobal = 1 + var_entry.is_readonly = 1 + var_entry.scope = entry.scope + entry.as_variable = var_entry + + def is_cpp(self): + return self.cpp + + def infer_types(self): + from .TypeInference import PyObjectTypeInferer + PyObjectTypeInferer().infer_types(self) + + +class LocalScope(Scope): + is_local_scope = True + + # Does the function have a 'with gil:' block? + has_with_gil_block = False + + # Transient attribute, used for symbol table variable declarations + _in_with_gil_block = False + + def __init__(self, name, outer_scope, parent_scope = None): + if parent_scope is None: + parent_scope = outer_scope + Scope.__init__(self, name, outer_scope, parent_scope) + + def mangle(self, prefix, name): + return punycodify_name(prefix + name) + + def declare_arg(self, name, type, pos): + # Add an entry for an argument of a function. + name = self.mangle_class_private_name(name) + cname = self.mangle(Naming.var_prefix, name) + entry = self.declare(name, cname, type, pos, 'private') + entry.is_variable = 1 + if type.is_pyobject: + entry.init = "0" + entry.is_arg = 1 + #entry.borrowed = 1 # Not using borrowed arg refs for now + self.arg_entries.append(entry) + return entry + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None): + name = self.mangle_class_private_name(name) + # Add an entry for a local variable. + if visibility in ('public', 'readonly'): + error(pos, "Local variable cannot be declared %s" % visibility) + entry = Scope.declare_var(self, name, type, pos, + cname=cname, visibility=visibility, + api=api, in_pxd=in_pxd, is_cdef=is_cdef, pytyping_modifiers=pytyping_modifiers) + if entry.type.declaration_value: + entry.init = entry.type.declaration_value + entry.is_local = 1 + + entry.in_with_gil_block = self._in_with_gil_block + self.var_entries.append(entry) + return entry + + def declare_global(self, name, pos): + # Pull entry from global scope into local scope. + if self.lookup_here(name): + warning(pos, "'%s' redeclared ", 0) + else: + entry = self.global_scope().lookup_target(name) + self.entries[name] = entry + + def declare_nonlocal(self, name, pos): + # Pull entry from outer scope into local scope + orig_entry = self.lookup_here(name) + if orig_entry and orig_entry.scope is self and not orig_entry.from_closure: + error(pos, "'%s' redeclared as nonlocal" % name) + orig_entry.already_declared_here() + else: + entry = self.lookup(name) + if entry is None or not entry.from_closure: + error(pos, "no binding for nonlocal '%s' found" % name) + + def _create_inner_entry_for_closure(self, name, entry): + entry.in_closure = True + inner_entry = InnerEntry(entry, self) + inner_entry.is_variable = True + self.entries[name] = inner_entry + return inner_entry + + def lookup(self, name): + # Look up name in this scope or an enclosing one. + # Return None if not found. + + entry = Scope.lookup(self, name) + if entry is not None: + entry_scope = entry.scope + while entry_scope.is_comprehension_scope: + entry_scope = entry_scope.outer_scope + if entry_scope is not self and entry_scope.is_closure_scope: + if hasattr(entry.scope, "scope_class"): + raise InternalError("lookup() after scope class created.") + # The actual c fragment for the different scopes differs + # on the outside and inside, so we make a new entry + return self._create_inner_entry_for_closure(name, entry) + return entry + + def mangle_closure_cnames(self, outer_scope_cname): + for scope in self.iter_local_scopes(): + for entry in scope.entries.values(): + if entry.from_closure: + cname = entry.outer_entry.cname + if self.is_passthrough: + entry.cname = cname + else: + if cname.startswith(Naming.cur_scope_cname): + cname = cname[len(Naming.cur_scope_cname)+2:] + entry.cname = "%s->%s" % (outer_scope_cname, cname) + elif entry.in_closure: + entry.original_cname = entry.cname + entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname) + if entry.type.is_cpp_class and entry.scope.directives['cpp_locals']: + entry.make_cpp_optional() + + +class ComprehensionScope(Scope): + """Scope for comprehensions (but not generator expressions, which use ClosureScope). + As opposed to generators, these can be easily inlined in some cases, so all + we really need is a scope that holds the loop variable(s). + """ + is_comprehension_scope = True + + def __init__(self, outer_scope): + parent_scope = outer_scope + # TODO: also ignore class scopes? + while parent_scope.is_comprehension_scope: + parent_scope = parent_scope.parent_scope + name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref) + Scope.__init__(self, name, outer_scope, parent_scope) + self.directives = outer_scope.directives + self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name) + + # Class/ExtType scopes are filled at class creation time, i.e. from the + # module init function or surrounding function. + while outer_scope.is_comprehension_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope: + outer_scope = outer_scope.outer_scope + self.var_entries = outer_scope.var_entries # keep declarations outside + outer_scope.subscopes.add(self) + + def mangle(self, prefix, name): + return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name)) + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=True, pytyping_modifiers=None): + if type is unspecified_type: + # if the outer scope defines a type for this variable, inherit it + outer_entry = self.outer_scope.lookup(name) + if outer_entry and outer_entry.is_variable: + type = outer_entry.type # may still be 'unspecified_type' ! + self._reject_pytyping_modifiers(pos, pytyping_modifiers) + # the parent scope needs to generate code for the variable, but + # this scope must hold its name exclusively + cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id())) + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = True + if self.parent_scope.is_module_scope: + entry.is_cglobal = True + else: + entry.is_local = True + entry.in_subscope = True + self.var_entries.append(entry) + self.entries[name] = entry + return entry + + def declare_assignment_expression_target(self, name, type, pos): + # should be declared in the parent scope instead + return self.parent_scope.declare_var(name, type, pos) + + def declare_pyfunction(self, name, pos, allow_redefine=False): + return self.outer_scope.declare_pyfunction( + name, pos, allow_redefine) + + def declare_lambda_function(self, func_cname, pos): + return self.outer_scope.declare_lambda_function(func_cname, pos) + + def add_lambda_def(self, def_node): + return self.outer_scope.add_lambda_def(def_node) + + def lookup_assignment_expression_target(self, name): + entry = self.lookup_here(name) + if not entry: + entry = self.parent_scope.lookup_assignment_expression_target(name) + return entry + + +class ClosureScope(LocalScope): + + is_closure_scope = True + + def __init__(self, name, scope_name, outer_scope, parent_scope=None): + LocalScope.__init__(self, name, outer_scope, parent_scope) + self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name) + +# def mangle_closure_cnames(self, scope_var): +# for entry in self.entries.values() + self.temp_entries: +# entry.in_closure = 1 +# LocalScope.mangle_closure_cnames(self, scope_var) + +# def mangle(self, prefix, name): +# return "%s->%s" % (self.cur_scope_cname, name) +# return "%s->%s" % (self.closure_cname, name) + + def declare_pyfunction(self, name, pos, allow_redefine=False): + return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private') + + def declare_assignment_expression_target(self, name, type, pos): + return self.declare_var(name, type, pos) + + +class GeneratorExpressionScope(ClosureScope): + is_generator_expression_scope = True + + def declare_assignment_expression_target(self, name, type, pos): + entry = self.parent_scope.declare_var(name, type, pos) + return self._create_inner_entry_for_closure(name, entry) + + def lookup_assignment_expression_target(self, name): + entry = self.lookup_here(name) + if not entry: + entry = self.parent_scope.lookup_assignment_expression_target(name) + if entry: + return self._create_inner_entry_for_closure(name, entry) + return entry + + +class StructOrUnionScope(Scope): + # Namespace of a C struct or union. + + def __init__(self, name="?"): + Scope.__init__(self, name, outer_scope=None, parent_scope=None) + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None, + allow_pyobject=False, allow_memoryview=False, allow_refcounted=False): + # Add an entry for an attribute. + if not cname: + cname = name + if visibility == 'private': + cname = c_safe_identifier(cname) + if type.is_cfunction: + type = PyrexTypes.CPtrType(type) + self._reject_pytyping_modifiers(pos, pytyping_modifiers) + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = 1 + self.var_entries.append(entry) + if type.is_pyobject: + if not allow_pyobject: + error(pos, "C struct/union member cannot be a Python object") + elif type.is_memoryviewslice: + if not allow_memoryview: + # Memory views wrap their buffer owner as a Python object. + error(pos, "C struct/union member cannot be a memory view") + elif type.needs_refcounting: + if not allow_refcounted: + error(pos, "C struct/union member cannot be reference-counted type '%s'" % type) + return entry + + def declare_cfunction(self, name, type, pos, + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), overridable=False): # currently no utility code ... + if overridable: + error(pos, "C struct/union member cannot be declared 'cpdef'") + return self.declare_var(name, type, pos, + cname=cname, visibility=visibility) + + +class ClassScope(Scope): + # Abstract base class for namespace of + # Python class or extension type. + # + # class_name string Python name of the class + # scope_prefix string Additional prefix for names + # declared in the class + # doc string or None Doc string + + scope_predefined_names = ['__module__', '__qualname__'] + + def mangle_class_private_name(self, name): + # a few utilitycode names need to specifically be ignored + if name and name.lower().startswith("__pyx_"): + return name + if name and name.startswith('__') and not name.endswith('__'): + name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name)) + return name + + def __init__(self, name, outer_scope): + Scope.__init__(self, name, outer_scope, outer_scope) + self.class_name = name + self.doc = None + + def lookup(self, name): + entry = Scope.lookup(self, name) + if entry: + return entry + if name == "classmethod": + # We don't want to use the builtin classmethod here 'cause it won't do the + # right thing in this scope (as the class members aren't still functions). + # Don't want to add a cfunction to this scope 'cause that would mess with + # the type definition, so we just return the right entry. + entry = Entry( + "classmethod", + "__Pyx_Method_ClassMethod", + PyrexTypes.CFuncType( + py_object_type, + [PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0)) + entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c") + self.use_entry_utility_code(entry) + entry.is_cfunction = 1 + return entry + + +class PyClassScope(ClassScope): + # Namespace of a Python class. + # + # class_obj_cname string C variable holding class object + + is_py_class_scope = 1 + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None): + name = self.mangle_class_private_name(name) + if type is unspecified_type: + type = py_object_type + # Add an entry for a class attribute. + entry = Scope.declare_var(self, name, type, pos, + cname=cname, visibility=visibility, + api=api, in_pxd=in_pxd, is_cdef=is_cdef, pytyping_modifiers=pytyping_modifiers) + entry.is_pyglobal = 1 + entry.is_pyclass_attr = 1 + return entry + + def declare_nonlocal(self, name, pos): + # Pull entry from outer scope into local scope + orig_entry = self.lookup_here(name) + if orig_entry and orig_entry.scope is self and not orig_entry.from_closure: + error(pos, "'%s' redeclared as nonlocal" % name) + orig_entry.already_declared_here() + else: + entry = self.lookup(name) + if entry is None: + error(pos, "no binding for nonlocal '%s' found" % name) + else: + # FIXME: this works, but it's unclear if it's the + # right thing to do + self.entries[name] = entry + + def declare_global(self, name, pos): + # Pull entry from global scope into local scope. + if self.lookup_here(name): + warning(pos, "'%s' redeclared ", 0) + else: + entry = self.global_scope().lookup_target(name) + self.entries[name] = entry + + def add_default_value(self, type): + return self.outer_scope.add_default_value(type) + + +class CClassScope(ClassScope): + # Namespace of an extension type. + # + # parent_type PyExtensionType + # #typeobj_cname string or None + # #objstruct_cname string + # method_table_cname string + # getset_table_cname string + # has_pyobject_attrs boolean Any PyObject attributes? + # has_memoryview_attrs boolean Any memory view attributes? + # has_cpp_class_attrs boolean Any (non-pointer) C++ attributes? + # has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC? + # property_entries [Entry] + # defined boolean Defined in .pxd file + # implemented boolean Defined in .pyx file + # inherited_var_entries [Entry] Adapted var entries from base class + + is_c_class_scope = 1 + is_closure_class_scope = False + + has_pyobject_attrs = False + has_memoryview_attrs = False + has_cpp_constructable_attrs = False + has_cyclic_pyobject_attrs = False + defined = False + implemented = False + + def __init__(self, name, outer_scope, visibility, parent_type): + ClassScope.__init__(self, name, outer_scope) + if visibility != 'extern': + self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name) + self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name) + self.property_entries = [] + self.inherited_var_entries = [] + self.parent_type = parent_type + # Usually parent_type will be an extension type and so the typeptr_cname + # can be used to calculate the namespace_cname. Occasionally other types + # are used (e.g. numeric/complex types) and in these cases the typeptr + # isn't relevant. + if ((parent_type.is_builtin_type or parent_type.is_extension_type) + and parent_type.typeptr_cname): + self.namespace_cname = "(PyObject *)%s" % parent_type.typeptr_cname + + def needs_gc(self): + # If the type or any of its base types have Python-valued + # C attributes, then it needs to participate in GC. + if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False): + return True + base_type = self.parent_type.base_type + if base_type and base_type.scope is not None: + return base_type.scope.needs_gc() + elif self.parent_type.is_builtin_type: + return not self.parent_type.is_gc_simple + return False + + def needs_trashcan(self): + # If the trashcan directive is explicitly set to False, + # unconditionally disable the trashcan. + directive = self.directives.get('trashcan') + if directive is False: + return False + # If the directive is set to True and the class has Python-valued + # C attributes, then it should use the trashcan in tp_dealloc. + if directive and self.has_cyclic_pyobject_attrs: + return True + # Use the trashcan if the base class uses it + base_type = self.parent_type.base_type + if base_type and base_type.scope is not None: + return base_type.scope.needs_trashcan() + return self.parent_type.builtin_trashcan + + def needs_tp_clear(self): + """ + Do we need to generate an implementation for the tp_clear slot? Can + be disabled to keep references for the __dealloc__ cleanup function. + """ + return self.needs_gc() and not self.directives.get('no_gc_clear', False) + + def may_have_finalize(self): + """ + This covers cases where we definitely have a __del__ function + and also cases where one of the base classes could have a __del__ + function but we don't know. + """ + current_type_scope = self + while current_type_scope: + del_entry = current_type_scope.lookup_here("__del__") + if del_entry and del_entry.is_special: + return True + if (current_type_scope.parent_type.is_external or not current_type_scope.implemented or + current_type_scope.parent_type.multiple_bases): + # we don't know if we have __del__, so assume we do and call it + return True + current_base_type = current_type_scope.parent_type.base_type + current_type_scope = current_base_type.scope if current_base_type else None + return False + + def get_refcounted_entries(self, include_weakref=False, + include_gc_simple=True): + py_attrs = [] + py_buffers = [] + memoryview_slices = [] + + for entry in self.var_entries: + if entry.type.is_pyobject: + if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"): + if include_gc_simple or not entry.type.is_gc_simple: + py_attrs.append(entry) + elif entry.type == PyrexTypes.c_py_buffer_type: + py_buffers.append(entry) + elif entry.type.is_memoryviewslice: + memoryview_slices.append(entry) + + have_entries = py_attrs or py_buffers or memoryview_slices + return have_entries, (py_attrs, py_buffers, memoryview_slices) + + def declare_var(self, name, type, pos, + cname=None, visibility='private', + api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None): + name = self.mangle_class_private_name(name) + + if pytyping_modifiers: + if "typing.ClassVar" in pytyping_modifiers: + is_cdef = 0 + if not type.is_pyobject: + if not type.equivalent_type: + warning(pos, "ClassVar[] requires the type to be a Python object type. Found '%s', using object instead." % type) + type = py_object_type + else: + type = type.equivalent_type + if "dataclasses.InitVar" in pytyping_modifiers and not self.is_c_dataclass_scope: + error(pos, "Use of cython.dataclasses.InitVar does not make sense outside a dataclass") + + if is_cdef: + # Add an entry for an attribute. + if self.defined: + error(pos, + "C attributes cannot be added in implementation part of" + " extension type defined in a pxd") + if (not self.is_closure_class_scope and + get_slot_table(self.directives).get_special_method_signature(name)): + error(pos, + "The name '%s' is reserved for a special method." + % name) + if not cname: + cname = name + if visibility == 'private': + cname = c_safe_identifier(cname) + cname = punycodify_name(cname, Naming.unicode_structmember_prefix) + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = 1 + self.var_entries.append(entry) + entry.pytyping_modifiers = pytyping_modifiers + if type.is_cpp_class and visibility != 'extern': + if self.directives['cpp_locals']: + entry.make_cpp_optional() + else: + type.check_nullary_constructor(pos) + if type.is_memoryviewslice: + self.has_memoryview_attrs = True + elif type.needs_cpp_construction: + self.use_utility_code(Code.UtilityCode("#include ")) + self.has_cpp_constructable_attrs = True + elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'): + self.has_pyobject_attrs = True + if (not type.is_builtin_type + or not type.scope or type.scope.needs_gc()): + self.has_cyclic_pyobject_attrs = True + if visibility not in ('private', 'public', 'readonly'): + error(pos, + "Attribute of extension type cannot be declared %s" % visibility) + if visibility in ('public', 'readonly'): + # If the field is an external typedef, we cannot be sure about the type, + # so do conversion ourself rather than rely on the CPython mechanism (through + # a property; made in AnalyseDeclarationsTransform). + entry.needs_property = True + if not self.is_closure_class_scope and name == "__weakref__": + error(pos, "Special attribute __weakref__ cannot be exposed to Python") + if not (type.is_pyobject or type.can_coerce_to_pyobject(self)): + # we're not testing for coercion *from* Python here - that would fail later + error(pos, "C attribute of type '%s' cannot be accessed from Python" % type) + else: + entry.needs_property = False + return entry + else: + if type is unspecified_type: + type = py_object_type + # Add an entry for a class attribute. + entry = Scope.declare_var(self, name, type, pos, + cname=cname, visibility=visibility, + api=api, in_pxd=in_pxd, is_cdef=is_cdef, pytyping_modifiers=pytyping_modifiers) + entry.is_member = 1 + # xxx: is_pyglobal changes behaviour in so many places that I keep it in for now. + # is_member should be enough later on + entry.is_pyglobal = 1 + + return entry + + def declare_pyfunction(self, name, pos, allow_redefine=False): + # Add an entry for a method. + if name in richcmp_special_methods: + if self.lookup_here('__richcmp__'): + error(pos, "Cannot define both % and __richcmp__" % name) + elif name == '__richcmp__': + for n in richcmp_special_methods: + if self.lookup_here(n): + error(pos, "Cannot define both % and __richcmp__" % n) + if name == "__new__": + error(pos, "__new__ method of extension type will change semantics " + "in a future version of Pyrex and Cython. Use __cinit__ instead.") + entry = self.declare_var(name, py_object_type, pos, + visibility='extern') + special_sig = get_slot_table(self.directives).get_special_method_signature(name) + if special_sig: + # Special methods get put in the method table with a particular + # signature declared in advance. + entry.signature = special_sig + entry.is_special = 1 + else: + entry.signature = pymethod_signature + entry.is_special = 0 + + self.pyfunc_entries.append(entry) + return entry + + def lookup_here(self, name): + if not self.is_closure_class_scope and name == "__new__": + name = EncodedString("__cinit__") + entry = ClassScope.lookup_here(self, name) + if entry and entry.is_builtin_cmethod: + if not self.parent_type.is_builtin_type: + # For subtypes of builtin types, we can only return + # optimised C methods if the type if final. + # Otherwise, subtypes may choose to override the + # method, but the optimisation would prevent the + # subtype method from being called. + if not self.parent_type.is_final_type: + return None + return entry + + def declare_cfunction(self, name, type, pos, + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): + name = self.mangle_class_private_name(name) + if (get_slot_table(self.directives).get_special_method_signature(name) + and not self.parent_type.is_builtin_type): + error(pos, "Special methods must be declared with 'def', not 'cdef'") + args = type.args + if not type.is_static_method: + if not args: + error(pos, "C method has no self argument") + elif not self.parent_type.assignable_from(args[0].type): + error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" % + (args[0].type, name, self.parent_type)) + entry = self.lookup_here(name) + if cname is None: + cname = punycodify_name(c_safe_identifier(name), Naming.unicode_vtabentry_prefix) + if entry: + if not entry.is_cfunction: + error(pos, "'%s' redeclared " % name) + entry.already_declared_here() + else: + if defining and entry.func_cname: + error(pos, "'%s' already defined" % name) + #print "CClassScope.declare_cfunction: checking signature" ### + if entry.is_final_cmethod and entry.is_inherited: + error(pos, "Overriding final methods is not allowed") + elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil: + # Fix with_gil vs nogil. + entry.type = entry.type.with_with_gil(type.with_gil) + elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil: + if (self.defined and not in_pxd + and not type.same_c_signature_as_resolved_type( + entry.type, as_cmethod=1, as_pxd_definition=1)): + # TODO(robertwb): Make this an error. + warning(pos, + "Compatible but non-identical C method '%s' not redeclared " + "in definition part of extension type '%s'. " + "This may cause incorrect vtables to be generated." % ( + name, self.class_name), 2) + warning(entry.pos, "Previous declaration is here", 2) + entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers) + else: + error(pos, "Signature not compatible with previous declaration") + error(entry.pos, "Previous declaration is here") + else: + if self.defined: + error(pos, + "C method '%s' not previously declared in definition part of" + " extension type '%s'" % (name, self.class_name)) + entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers) + if defining: + entry.func_cname = self.mangle(Naming.func_prefix, name) + entry.utility_code = utility_code + type.entry = entry + + if u'inline' in modifiers: + entry.is_inline_cmethod = True + + if self.parent_type.is_final_type or entry.is_inline_cmethod or self.directives.get('final'): + entry.is_final_cmethod = True + entry.final_func_cname = entry.func_cname + if not type.is_fused: + entry.vtable_type = entry.type + entry.type = type + + return entry + + def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False): + # Add a cfunction entry without giving it a func_cname. + prev_entry = self.lookup_here(name) + entry = ClassScope.add_cfunction( + self, name, type, pos, cname, visibility, modifiers, inherited=inherited) + entry.is_cmethod = 1 + entry.prev_entry = prev_entry + return entry + + def declare_builtin_cfunction(self, name, type, cname, utility_code = None): + # overridden methods of builtin types still have their Python + # equivalent that must be accessible to support bound methods + name = EncodedString(name) + entry = self.declare_cfunction( + name, type, pos=None, cname=cname, visibility='extern', utility_code=utility_code) + var_entry = Entry(name, name, py_object_type) + var_entry.qualified_name = name + var_entry.is_variable = 1 + var_entry.is_builtin = 1 + var_entry.utility_code = utility_code + var_entry.scope = entry.scope + entry.as_variable = var_entry + return entry + + def declare_property(self, name, doc, pos, ctype=None, property_scope=None): + entry = self.lookup_here(name) + if entry is None: + entry = self.declare(name, name, py_object_type if ctype is None else ctype, pos, 'private') + entry.is_property = True + if ctype is not None: + entry.is_cproperty = True + entry.doc = doc + if property_scope is None: + entry.scope = PropertyScope(name, class_scope=self) + else: + entry.scope = property_scope + self.property_entries.append(entry) + return entry + + def declare_cproperty(self, name, type, cfunc_name, doc=None, pos=None, visibility='extern', + nogil=False, with_gil=False, exception_value=None, exception_check=False, + utility_code=None): + """Internal convenience method to declare a C property function in one go. + """ + property_entry = self.declare_property(name, doc=doc, ctype=type, pos=pos) + cfunc_entry = property_entry.scope.declare_cfunction( + name=name, + type=PyrexTypes.CFuncType( + type, + [PyrexTypes.CFuncTypeArg("self", self.parent_type, pos=None)], + nogil=nogil, + with_gil=with_gil, + exception_value=exception_value, + exception_check=exception_check, + ), + cname=cfunc_name, + utility_code=utility_code, + visibility=visibility, + pos=pos, + ) + return property_entry, cfunc_entry + + def declare_inherited_c_attributes(self, base_scope): + # Declare entries for all the C attributes of an + # inherited type, with cnames modified appropriately + # to work with this type. + def adapt(cname): + return "%s.%s" % (Naming.obj_base_cname, base_entry.cname) + + entries = base_scope.inherited_var_entries + base_scope.var_entries + for base_entry in entries: + entry = self.declare( + base_entry.name, adapt(base_entry.cname), + base_entry.type, None, 'private') + entry.is_variable = 1 + entry.is_inherited = True + entry.annotation = base_entry.annotation + self.inherited_var_entries.append(entry) + + # If the class defined in a pxd, specific entries have not been added. + # Ensure now that the parent (base) scope has specific entries + # Iterate over a copy as get_all_specialized_function_types() will mutate + for base_entry in base_scope.cfunc_entries[:]: + if base_entry.type.is_fused: + base_entry.type.get_all_specialized_function_types() + + for base_entry in base_scope.cfunc_entries: + cname = base_entry.cname + var_entry = base_entry.as_variable + is_builtin = var_entry and var_entry.is_builtin + if not is_builtin: + cname = adapt(cname) + entry = self.add_cfunction( + base_entry.name, base_entry.type, base_entry.pos, cname, + base_entry.visibility, base_entry.func_modifiers, inherited=True) + entry.is_inherited = 1 + if base_entry.is_final_cmethod: + entry.is_final_cmethod = True + entry.is_inline_cmethod = base_entry.is_inline_cmethod + if (self.parent_scope == base_scope.parent_scope or + entry.is_inline_cmethod): + entry.final_func_cname = base_entry.final_func_cname + if is_builtin: + entry.is_builtin_cmethod = True + entry.as_variable = var_entry + if base_entry.utility_code: + entry.utility_code = base_entry.utility_code + + +class CppClassScope(Scope): + # Namespace of a C++ class. + + is_cpp_class_scope = 1 + + default_constructor = None + type = None + + def __init__(self, name, outer_scope, templates=None): + Scope.__init__(self, name, outer_scope, None) + self.directives = outer_scope.directives + self.inherited_var_entries = [] + if templates is not None: + for T in templates: + template_entry = self.declare( + T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern') + template_entry.is_type = 1 + + def declare_var(self, name, type, pos, + cname=None, visibility='extern', + api=False, in_pxd=False, is_cdef=False, defining=False, pytyping_modifiers=None): + # Add an entry for an attribute. + if not cname: + cname = name + self._reject_pytyping_modifiers(pos, pytyping_modifiers) + entry = self.lookup_here(name) + if defining and entry is not None: + if entry.type.same_as(type): + # Fix with_gil vs nogil. + entry.type = entry.type.with_with_gil(type.with_gil) + elif type.is_cfunction and type.compatible_signature_with(entry.type): + entry.type = type + else: + error(pos, "Function signature does not match previous declaration") + else: + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = 1 + if type.is_cfunction and self.type: + if not self.type.get_fused_types(): + entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname) + if name != "this" and (defining or name != ""): + self.var_entries.append(entry) + return entry + + def declare_cfunction(self, name, type, pos, + cname=None, visibility='extern', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): + class_name = self.name.split('::')[-1] + if name in (class_name, '__init__') and cname is None: + cname = "%s__init__%s" % (Naming.func_prefix, class_name) + name = EncodedString('') + type.return_type = PyrexTypes.CVoidType() + # This is called by the actual constructor, but need to support + # arguments that cannot by called by value. + type.original_args = type.args + def maybe_ref(arg): + if arg.type.is_cpp_class and not arg.type.is_reference: + return PyrexTypes.CFuncTypeArg( + arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos) + else: + return arg + type.args = [maybe_ref(arg) for arg in type.args] + elif name == '__dealloc__' and cname is None: + cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name) + name = EncodedString('') + type.return_type = PyrexTypes.CVoidType() + if name in ('', '') and type.nogil: + for base in self.type.base_classes: + base_entry = base.scope.lookup(name) + if base_entry and not base_entry.type.nogil: + error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL") + error(base_entry.pos, "Base constructor defined here.") + prev_entry = self.lookup_here(name) + entry = self.declare_var(name, type, pos, + defining=defining, + cname=cname, visibility=visibility) + if prev_entry and not defining: + entry.overloaded_alternatives = prev_entry.all_alternatives() + entry.utility_code = utility_code + type.entry = entry + return entry + + def declare_inherited_cpp_attributes(self, base_class): + base_scope = base_class.scope + template_type = base_class + while getattr(template_type, 'template_type', None): + template_type = template_type.template_type + if getattr(template_type, 'templates', None): + base_templates = [T.name for T in template_type.templates] + else: + base_templates = () + # Declare entries for all the C++ attributes of an + # inherited type, with cnames modified appropriately + # to work with this type. + for base_entry in base_scope.inherited_var_entries + base_scope.var_entries: + #constructor/destructor is not inherited + if base_entry.name in ("", ""): + continue + #print base_entry.name, self.entries + if base_entry.name in self.entries: + base_entry.name # FIXME: is there anything to do in this case? + entry = self.declare(base_entry.name, base_entry.cname, + base_entry.type, None, 'extern') + entry.is_variable = 1 + entry.is_inherited = 1 + self.inherited_var_entries.append(entry) + for base_entry in base_scope.cfunc_entries: + entry = self.declare_cfunction(base_entry.name, base_entry.type, + base_entry.pos, base_entry.cname, + base_entry.visibility, api=0, + modifiers=base_entry.func_modifiers, + utility_code=base_entry.utility_code) + entry.is_inherited = 1 + for base_entry in base_scope.type_entries: + if base_entry.name not in base_templates: + entry = self.declare_type(base_entry.name, base_entry.type, + base_entry.pos, base_entry.cname, + base_entry.visibility, defining=False) + entry.is_inherited = 1 + + def specialize(self, values, type_entry): + scope = CppClassScope(self.name, self.outer_scope) + scope.type = type_entry + for entry in self.entries.values(): + if entry.is_type: + scope.declare_type(entry.name, + entry.type.specialize(values), + entry.pos, + entry.cname, + template=1) + elif entry.type.is_cfunction: + for e in entry.all_alternatives(): + scope.declare_cfunction(e.name, + e.type.specialize(values), + e.pos, + e.cname, + utility_code=e.utility_code) + else: + scope.declare_var(entry.name, + entry.type.specialize(values), + entry.pos, + entry.cname, + entry.visibility) + + return scope + + +class CppScopedEnumScope(Scope): + # Namespace of a ScopedEnum + + def __init__(self, name, outer_scope): + Scope.__init__(self, name, outer_scope, None) + + def declare_var(self, name, type, pos, + cname=None, visibility='extern', pytyping_modifiers=None): + # Add an entry for an attribute. + if not cname: + cname = name + self._reject_pytyping_modifiers(pos, pytyping_modifiers) + entry = self.declare(name, cname, type, pos, visibility) + entry.is_variable = True + return entry + + +class PropertyScope(Scope): + # Scope holding the __get__, __set__ and __del__ methods for + # a property of an extension type. + # + # parent_type PyExtensionType The type to which the property belongs + + is_property_scope = 1 + + def __init__(self, name, class_scope): + # outer scope is None for some internal properties + outer_scope = class_scope.global_scope() if class_scope.outer_scope else None + Scope.__init__(self, name, outer_scope, parent_scope=class_scope) + self.parent_type = class_scope.parent_type + self.directives = class_scope.directives + + def declare_cfunction(self, name, type, pos, *args, **kwargs): + """Declare a C property function. + """ + if type.return_type.is_void: + error(pos, "C property method cannot return 'void'") + + if type.args and type.args[0].type is py_object_type: + # Set 'self' argument type to extension type. + type.args[0].type = self.parent_scope.parent_type + elif len(type.args) != 1: + error(pos, "C property method must have a single (self) argument") + elif not (type.args[0].type.is_pyobject or type.args[0].type is self.parent_scope.parent_type): + error(pos, "C property method must have a single (object) argument") + + entry = Scope.declare_cfunction(self, name, type, pos, *args, **kwargs) + entry.is_cproperty = True + return entry + + def declare_pyfunction(self, name, pos, allow_redefine=False): + # Add an entry for a method. + signature = get_property_accessor_signature(name) + if signature: + entry = self.declare(name, name, py_object_type, pos, 'private') + entry.is_special = 1 + entry.signature = signature + return entry + else: + error(pos, "Only __get__, __set__ and __del__ methods allowed " + "in a property declaration") + return None + + +class CConstOrVolatileScope(Scope): + + def __init__(self, base_type_scope, is_const=0, is_volatile=0): + Scope.__init__( + self, + 'cv_' + base_type_scope.name, + base_type_scope.outer_scope, + base_type_scope.parent_scope) + self.base_type_scope = base_type_scope + self.is_const = is_const + self.is_volatile = is_volatile + + def lookup_here(self, name): + entry = self.base_type_scope.lookup_here(name) + if entry is not None: + entry = copy.copy(entry) + entry.type = PyrexTypes.c_const_or_volatile_type( + entry.type, self.is_const, self.is_volatile) + return entry + + +class TemplateScope(Scope): + def __init__(self, name, outer_scope): + Scope.__init__(self, name, outer_scope, None) + self.directives = outer_scope.directives diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TreeFragment.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TreeFragment.py new file mode 100644 index 0000000000000000000000000000000000000000..521bdda27b949e98134b580979603ea6684fede2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TreeFragment.py @@ -0,0 +1,280 @@ +# +# TreeFragments - parsing of strings to trees +# + +""" +Support for parsing strings into code trees. +""" + +from __future__ import absolute_import + +import re +from io import StringIO + +from .Scanning import PyrexScanner, StringSourceDescriptor +from .Symtab import ModuleScope +from . import PyrexTypes +from .Visitor import VisitorTransform +from .Nodes import Node, StatListNode +from .ExprNodes import NameNode +from .StringEncoding import _unicode +from . import Parsing +from . import Main +from . import UtilNodes + + +class StringParseContext(Main.Context): + def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False): + if include_directories is None: + include_directories = [] + if compiler_directives is None: + compiler_directives = {} + Main.Context.__init__(self, include_directories, compiler_directives, cpp=cpp, language_level='3str') + self.module_name = name + + def find_module(self, module_name, from_module=None, pos=None, need_pxd=1, absolute_fallback=True, relative_import=False): + if module_name not in (self.module_name, 'cython'): + raise AssertionError("Not yet supporting any cimports/includes from string code snippets") + return ModuleScope(module_name, parent_module=None, context=self) + + +def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, + context=None, allow_struct_enum_decorator=False, + in_utility_code=False): + """ + Utility method to parse a (unicode) string of code. This is mostly + used for internal Cython compiler purposes (creating code snippets + that transforms should emit, as well as unit testing). + + code - a unicode string containing Cython (module-level) code + name - a descriptive name for the code source (to use in error messages etc.) + in_utility_code - used to suppress some messages from utility code. False by default + because some generated code snippets like properties and dataclasses + probably want to see those messages. + + RETURNS + + The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is + set to the scope used when parsing. + """ + if context is None: + context = StringParseContext(name) + # Since source files carry an encoding, it makes sense in this context + # to use a unicode string so that code fragments don't have to bother + # with encoding. This means that test code passed in should not have an + # encoding header. + assert isinstance(code, _unicode), "unicode code snippets only please" + encoding = "UTF-8" + + module_name = name + if initial_pos is None: + initial_pos = (name, 1, 0) + code_source = StringSourceDescriptor(name, code) + if in_utility_code: + code_source.in_utility_code = True + + scope = context.find_module(module_name, pos=initial_pos, need_pxd=False) + + buf = StringIO(code) + + scanner = PyrexScanner(buf, code_source, source_encoding = encoding, + scope = scope, context = context, initial_pos = initial_pos) + ctx = Parsing.Ctx(allow_struct_enum_decorator=allow_struct_enum_decorator) + + if level is None: + tree = Parsing.p_module(scanner, 0, module_name, ctx=ctx) + tree.scope = scope + tree.is_pxd = False + else: + tree = Parsing.p_code(scanner, level=level, ctx=ctx) + + tree.scope = scope + return tree + + +class TreeCopier(VisitorTransform): + def visit_Node(self, node): + if node is None: + return node + else: + c = node.clone_node() + self.visitchildren(c) + return c + + +class ApplyPositionAndCopy(TreeCopier): + def __init__(self, pos): + super(ApplyPositionAndCopy, self).__init__() + self.pos = pos + + def visit_Node(self, node): + copy = super(ApplyPositionAndCopy, self).visit_Node(node) + copy.pos = self.pos + return copy + + +class TemplateTransform(VisitorTransform): + """ + Makes a copy of a template tree while doing substitutions. + + A dictionary "substitutions" should be passed in when calling + the transform; mapping names to replacement nodes. Then replacement + happens like this: + - If an ExprStatNode contains a single NameNode, whose name is + a key in the substitutions dictionary, the ExprStatNode is + replaced with a copy of the tree given in the dictionary. + It is the responsibility of the caller that the replacement + node is a valid statement. + - If a single NameNode is otherwise encountered, it is replaced + if its name is listed in the substitutions dictionary in the + same way. It is the responsibility of the caller to make sure + that the replacement nodes is a valid expression. + + Also a list "temps" should be passed. Any names listed will + be transformed into anonymous, temporary names. + + Currently supported for tempnames is: + NameNode + (various function and class definition nodes etc. should be added to this) + + Each replacement node gets the position of the substituted node + recursively applied to every member node. + """ + + temp_name_counter = 0 + + def __call__(self, node, substitutions, temps, pos): + self.substitutions = substitutions + self.pos = pos + tempmap = {} + temphandles = [] + for temp in temps: + TemplateTransform.temp_name_counter += 1 + handle = UtilNodes.TempHandle(PyrexTypes.py_object_type) + tempmap[temp] = handle + temphandles.append(handle) + self.tempmap = tempmap + result = super(TemplateTransform, self).__call__(node) + if temps: + result = UtilNodes.TempsBlockNode(self.get_pos(node), + temps=temphandles, + body=result) + return result + + def get_pos(self, node): + if self.pos: + return self.pos + else: + return node.pos + + def visit_Node(self, node): + if node is None: + return None + else: + c = node.clone_node() + if self.pos is not None: + c.pos = self.pos + self.visitchildren(c) + return c + + def try_substitution(self, node, key): + sub = self.substitutions.get(key) + if sub is not None: + pos = self.pos + if pos is None: pos = node.pos + return ApplyPositionAndCopy(pos)(sub) + else: + return self.visit_Node(node) # make copy as usual + + def visit_NameNode(self, node): + temphandle = self.tempmap.get(node.name) + if temphandle: + # Replace name with temporary + return temphandle.ref(self.get_pos(node)) + else: + return self.try_substitution(node, node.name) + + def visit_ExprStatNode(self, node): + # If an expression-as-statement consists of only a replaceable + # NameNode, we replace the entire statement, not only the NameNode + if isinstance(node.expr, NameNode): + return self.try_substitution(node, node.expr.name) + else: + return self.visit_Node(node) + + +def copy_code_tree(node): + return TreeCopier()(node) + + +_match_indent = re.compile(u"^ *").match + + +def strip_common_indent(lines): + """Strips empty lines and common indentation from the list of strings given in lines""" + # TODO: Facilitate textwrap.indent instead + lines = [x for x in lines if x.strip() != u""] + if lines: + minindent = min([len(_match_indent(x).group(0)) for x in lines]) + lines = [x[minindent:] for x in lines] + return lines + + +class TreeFragment(object): + def __init__(self, code, name=None, pxds=None, temps=None, pipeline=None, level=None, initial_pos=None): + if pxds is None: + pxds = {} + if temps is None: + temps = [] + if pipeline is None: + pipeline = [] + if not name: + name = "(tree fragment)" + + if isinstance(code, _unicode): + def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n"))) + + fmt_code = fmt(code) + fmt_pxds = {} + for key, value in pxds.items(): + fmt_pxds[key] = fmt(value) + mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos) + if level is None: + t = t.body # Make sure a StatListNode is at the top + if not isinstance(t, StatListNode): + t = StatListNode(pos=mod.pos, stats=[t]) + for transform in pipeline: + if transform is None: + continue + t = transform(t) + self.root = t + elif isinstance(code, Node): + if pxds: + raise NotImplementedError() + self.root = code + else: + raise ValueError("Unrecognized code format (accepts unicode and Node)") + self.temps = temps + + def copy(self): + return copy_code_tree(self.root) + + def substitute(self, nodes=None, temps=None, pos = None): + if nodes is None: + nodes = {} + if temps is None: + temps = [] + return TemplateTransform()(self.root, + substitutions = nodes, + temps = self.temps + temps, pos = pos) + + +class SetPosTransform(VisitorTransform): + def __init__(self, pos): + super(SetPosTransform, self).__init__() + self.pos = pos + + def visit_Node(self, node): + node.pos = self.pos + self.visitchildren(node) + return node diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TypeInference.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TypeInference.py new file mode 100644 index 0000000000000000000000000000000000000000..bb68db8976a7f86f2b5b5f9452b5520137514d10 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/TypeInference.py @@ -0,0 +1,596 @@ +from __future__ import absolute_import + +from .Errors import error, message +from . import ExprNodes +from . import Nodes +from . import Builtin +from . import PyrexTypes +from .. import Utils +from .PyrexTypes import py_object_type, unspecified_type +from .Visitor import CythonTransform, EnvTransform + +try: + reduce +except NameError: + from functools import reduce + + +class TypedExprNode(ExprNodes.ExprNode): + # Used for declaring assignments of a specified type without a known entry. + subexprs = [] + + def __init__(self, type, pos=None): + super(TypedExprNode, self).__init__(pos, type=type) + +object_expr = TypedExprNode(py_object_type) + + +class MarkParallelAssignments(EnvTransform): + # Collects assignments inside parallel blocks prange, with parallel. + # Perhaps it's better to move it to ControlFlowAnalysis. + + # tells us whether we're in a normal loop + in_loop = False + + parallel_errors = False + + def __init__(self, context): + # Track the parallel block scopes (with parallel, for i in prange()) + self.parallel_block_stack = [] + super(MarkParallelAssignments, self).__init__(context) + + def mark_assignment(self, lhs, rhs, inplace_op=None): + if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)): + if lhs.entry is None: + # TODO: This shouldn't happen... + return + + if self.parallel_block_stack: + parallel_node = self.parallel_block_stack[-1] + previous_assignment = parallel_node.assignments.get(lhs.entry) + + # If there was a previous assignment to the variable, keep the + # previous assignment position + if previous_assignment: + pos, previous_inplace_op = previous_assignment + + if (inplace_op and previous_inplace_op and + inplace_op != previous_inplace_op): + # x += y; x *= y + t = (inplace_op, previous_inplace_op) + error(lhs.pos, + "Reduction operator '%s' is inconsistent " + "with previous reduction operator '%s'" % t) + else: + pos = lhs.pos + + parallel_node.assignments[lhs.entry] = (pos, inplace_op) + parallel_node.assigned_nodes.append(lhs) + + elif isinstance(lhs, ExprNodes.SequenceNode): + for i, arg in enumerate(lhs.args): + if not rhs or arg.is_starred: + item_node = None + else: + item_node = rhs.inferable_item_node(i) + self.mark_assignment(arg, item_node) + else: + # Could use this info to infer cdef class attributes... + pass + + def visit_WithTargetAssignmentStatNode(self, node): + self.mark_assignment(node.lhs, node.with_node.enter_call) + self.visitchildren(node) + return node + + def visit_SingleAssignmentNode(self, node): + self.mark_assignment(node.lhs, node.rhs) + self.visitchildren(node) + return node + + def visit_CascadedAssignmentNode(self, node): + for lhs in node.lhs_list: + self.mark_assignment(lhs, node.rhs) + self.visitchildren(node) + return node + + def visit_InPlaceAssignmentNode(self, node): + self.mark_assignment(node.lhs, node.create_binop_node(), node.operator) + self.visitchildren(node) + return node + + def visit_ForInStatNode(self, node): + # TODO: Remove redundancy with range optimization... + is_special = False + sequence = node.iterator.sequence + target = node.target + iterator_scope = node.iterator.expr_scope or self.current_env() + if isinstance(sequence, ExprNodes.SimpleCallNode): + function = sequence.function + if sequence.self is None and function.is_name: + entry = iterator_scope.lookup(function.name) + if not entry or entry.is_builtin: + if function.name == 'reversed' and len(sequence.args) == 1: + sequence = sequence.args[0] + elif function.name == 'enumerate' and len(sequence.args) == 1: + if target.is_sequence_constructor and len(target.args) == 2: + iterator = sequence.args[0] + if iterator.is_name: + iterator_type = iterator.infer_type(iterator_scope) + if iterator_type.is_builtin_type: + # assume that builtin types have a length within Py_ssize_t + self.mark_assignment( + target.args[0], + ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', + type=PyrexTypes.c_py_ssize_t_type)) + target = target.args[1] + sequence = sequence.args[0] + if isinstance(sequence, ExprNodes.SimpleCallNode): + function = sequence.function + if sequence.self is None and function.is_name: + entry = iterator_scope.lookup(function.name) + if not entry or entry.is_builtin: + if function.name in ('range', 'xrange'): + is_special = True + for arg in sequence.args[:2]: + self.mark_assignment(target, arg) + if len(sequence.args) > 2: + self.mark_assignment( + target, + ExprNodes.binop_node(node.pos, + '+', + sequence.args[0], + sequence.args[2])) + if not is_special: + # A for-loop basically translates to subsequent calls to + # __getitem__(), so using an IndexNode here allows us to + # naturally infer the base type of pointers, C arrays, + # Python strings, etc., while correctly falling back to an + # object type when the base type cannot be handled. + self.mark_assignment(target, ExprNodes.IndexNode( + node.pos, + base=sequence, + index=ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', + type=PyrexTypes.c_py_ssize_t_type))) + + self.visitchildren(node) + return node + + def visit_ForFromStatNode(self, node): + self.mark_assignment(node.target, node.bound1) + if node.step is not None: + self.mark_assignment(node.target, + ExprNodes.binop_node(node.pos, + '+', + node.bound1, + node.step)) + self.visitchildren(node) + return node + + def visit_WhileStatNode(self, node): + self.visitchildren(node) + return node + + def visit_ExceptClauseNode(self, node): + if node.target is not None: + self.mark_assignment(node.target, object_expr) + self.visitchildren(node) + return node + + def visit_FromCImportStatNode(self, node): + return node # Can't be assigned to... + + def visit_FromImportStatNode(self, node): + for name, target in node.items: + if name != "*": + self.mark_assignment(target, object_expr) + self.visitchildren(node) + return node + + def visit_DefNode(self, node): + # use fake expressions with the right result type + if node.star_arg: + self.mark_assignment( + node.star_arg, TypedExprNode(Builtin.tuple_type, node.pos)) + if node.starstar_arg: + self.mark_assignment( + node.starstar_arg, TypedExprNode(Builtin.dict_type, node.pos)) + EnvTransform.visit_FuncDefNode(self, node) + return node + + def visit_DelStatNode(self, node): + for arg in node.args: + self.mark_assignment(arg, arg) + self.visitchildren(node) + return node + + def visit_ParallelStatNode(self, node): + if self.parallel_block_stack: + node.parent = self.parallel_block_stack[-1] + else: + node.parent = None + + nested = False + if node.is_prange: + if not node.parent: + node.is_parallel = True + else: + node.is_parallel = (node.parent.is_prange or not + node.parent.is_parallel) + nested = node.parent.is_prange + else: + node.is_parallel = True + # Note: nested with parallel() blocks are handled by + # ParallelRangeTransform! + # nested = node.parent + nested = node.parent and node.parent.is_prange + + self.parallel_block_stack.append(node) + + nested = nested or len(self.parallel_block_stack) > 2 + if not self.parallel_errors and nested and not node.is_prange: + error(node.pos, "Only prange() may be nested") + self.parallel_errors = True + + if node.is_prange: + self.visitchildren(node, attrs=('body', 'target', 'args')) + + self.parallel_block_stack.pop() + if node.else_clause: + node.else_clause = self.visit(node.else_clause) + else: + self.visitchildren(node) + self.parallel_block_stack.pop() + + self.parallel_errors = False + return node + + def visit_YieldExprNode(self, node): + if self.parallel_block_stack: + error(node.pos, "'%s' not allowed in parallel sections" % node.expr_keyword) + return node + + def visit_ReturnStatNode(self, node): + node.in_parallel = bool(self.parallel_block_stack) + return node + + +class MarkOverflowingArithmetic(CythonTransform): + + # It may be possible to integrate this with the above for + # performance improvements (though likely not worth it). + + might_overflow = False + + def __call__(self, root): + self.env_stack = [] + self.env = root.scope + return super(MarkOverflowingArithmetic, self).__call__(root) + + def visit_safe_node(self, node): + self.might_overflow, saved = False, self.might_overflow + self.visitchildren(node) + self.might_overflow = saved + return node + + def visit_neutral_node(self, node): + self.visitchildren(node) + return node + + def visit_dangerous_node(self, node): + self.might_overflow, saved = True, self.might_overflow + self.visitchildren(node) + self.might_overflow = saved + return node + + def visit_FuncDefNode(self, node): + self.env_stack.append(self.env) + self.env = node.local_scope + self.visit_safe_node(node) + self.env = self.env_stack.pop() + return node + + def visit_NameNode(self, node): + if self.might_overflow: + entry = node.entry or self.env.lookup(node.name) + if entry: + entry.might_overflow = True + return node + + def visit_BinopNode(self, node): + if node.operator in '&|^': + return self.visit_neutral_node(node) + else: + return self.visit_dangerous_node(node) + + def visit_SimpleCallNode(self, node): + if node.function.is_name and node.function.name == 'abs': + # Overflows for minimum value of fixed size ints. + return self.visit_dangerous_node(node) + else: + return self.visit_neutral_node(node) + + visit_UnopNode = visit_neutral_node + + visit_UnaryMinusNode = visit_dangerous_node + + visit_InPlaceAssignmentNode = visit_dangerous_node + + visit_Node = visit_safe_node + + def visit_assignment(self, lhs, rhs): + if (isinstance(rhs, ExprNodes.IntNode) + and isinstance(lhs, ExprNodes.NameNode) + and Utils.long_literal(rhs.value)): + entry = lhs.entry or self.env.lookup(lhs.name) + if entry: + entry.might_overflow = True + + def visit_SingleAssignmentNode(self, node): + self.visit_assignment(node.lhs, node.rhs) + self.visitchildren(node) + return node + + def visit_CascadedAssignmentNode(self, node): + for lhs in node.lhs_list: + self.visit_assignment(lhs, node.rhs) + self.visitchildren(node) + return node + +class PyObjectTypeInferer(object): + """ + If it's not declared, it's a PyObject. + """ + def infer_types(self, scope): + """ + Given a dict of entries, map all unspecified types to a specified type. + """ + for name, entry in scope.entries.items(): + if entry.type is unspecified_type: + entry.type = py_object_type + +class SimpleAssignmentTypeInferer(object): + """ + Very basic type inference. + + Note: in order to support cross-closure type inference, this must be + applies to nested scopes in top-down order. + """ + def set_entry_type(self, entry, entry_type, scope): + for e in entry.all_entries(): + e.type = entry_type + if e.type.is_memoryviewslice: + # memoryview slices crash if they don't get initialized + e.init = e.type.default_value + if e.type.is_cpp_class: + if scope.directives['cpp_locals']: + e.make_cpp_optional() + else: + e.type.check_nullary_constructor(entry.pos) + + def infer_types(self, scope): + enabled = scope.directives['infer_types'] + verbose = scope.directives['infer_types.verbose'] + + if enabled == True: + spanning_type = aggressive_spanning_type + elif enabled is None: # safe mode + spanning_type = safe_spanning_type + else: + for entry in scope.entries.values(): + if entry.type is unspecified_type: + self.set_entry_type(entry, py_object_type, scope) + return + + # Set of assignments + assignments = set() + assmts_resolved = set() + dependencies = {} + assmt_to_names = {} + + for name, entry in scope.entries.items(): + for assmt in entry.cf_assignments: + names = assmt.type_dependencies() + assmt_to_names[assmt] = names + assmts = set() + for node in names: + assmts.update(node.cf_state) + dependencies[assmt] = assmts + if entry.type is unspecified_type: + assignments.update(entry.cf_assignments) + else: + assmts_resolved.update(entry.cf_assignments) + + def infer_name_node_type(node): + types = [assmt.inferred_type for assmt in node.cf_state] + if not types: + node_type = py_object_type + else: + entry = node.entry + node_type = spanning_type( + types, entry.might_overflow, scope) + node.inferred_type = node_type + + def infer_name_node_type_partial(node): + types = [assmt.inferred_type for assmt in node.cf_state + if assmt.inferred_type is not None] + if not types: + return + entry = node.entry + return spanning_type(types, entry.might_overflow, scope) + + def inferred_types(entry): + has_none = False + has_pyobjects = False + types = [] + for assmt in entry.cf_assignments: + if assmt.rhs.is_none: + has_none = True + else: + rhs_type = assmt.inferred_type + if rhs_type and rhs_type.is_pyobject: + has_pyobjects = True + types.append(rhs_type) + # Ignore None assignments as long as there are concrete Python type assignments. + # but include them if None is the only assigned Python object. + if has_none and not has_pyobjects: + types.append(py_object_type) + return types + + def resolve_assignments(assignments): + resolved = set() + for assmt in assignments: + deps = dependencies[assmt] + # All assignments are resolved + if assmts_resolved.issuperset(deps): + for node in assmt_to_names[assmt]: + infer_name_node_type(node) + # Resolve assmt + inferred_type = assmt.infer_type() + assmts_resolved.add(assmt) + resolved.add(assmt) + assignments.difference_update(resolved) + return resolved + + def partial_infer(assmt): + partial_types = [] + for node in assmt_to_names[assmt]: + partial_type = infer_name_node_type_partial(node) + if partial_type is None: + return False + partial_types.append((node, partial_type)) + for node, partial_type in partial_types: + node.inferred_type = partial_type + assmt.infer_type() + return True + + partial_assmts = set() + def resolve_partial(assignments): + # try to handle circular references + partials = set() + for assmt in assignments: + if assmt in partial_assmts: + continue + if partial_infer(assmt): + partials.add(assmt) + assmts_resolved.add(assmt) + partial_assmts.update(partials) + return partials + + # Infer assignments + while True: + if not resolve_assignments(assignments): + if not resolve_partial(assignments): + break + inferred = set() + # First pass + for entry in scope.entries.values(): + if entry.type is not unspecified_type: + continue + entry_type = py_object_type + if assmts_resolved.issuperset(entry.cf_assignments): + types = inferred_types(entry) + if types and all(types): + entry_type = spanning_type( + types, entry.might_overflow, scope) + inferred.add(entry) + self.set_entry_type(entry, entry_type, scope) + + def reinfer(): + dirty = False + for entry in inferred: + for assmt in entry.cf_assignments: + assmt.infer_type() + types = inferred_types(entry) + new_type = spanning_type(types, entry.might_overflow, scope) + if new_type != entry.type: + self.set_entry_type(entry, new_type, scope) + dirty = True + return dirty + + # types propagation + while reinfer(): + pass + + if verbose: + for entry in inferred: + message(entry.pos, "inferred '%s' to be of type '%s'" % ( + entry.name, entry.type)) + + +def find_spanning_type(type1, type2): + if type1 is type2: + result_type = type1 + elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type: + # type inference can break the coercion back to a Python bool + # if it returns an arbitrary int type here + return py_object_type + else: + result_type = PyrexTypes.spanning_type(type1, type2) + if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type, + Builtin.float_type): + # Python's float type is just a C double, so it's safe to + # use the C type instead + return PyrexTypes.c_double_type + return result_type + +def simply_type(result_type): + result_type = PyrexTypes.remove_cv_ref(result_type, remove_fakeref=True) + if result_type.is_array: + result_type = PyrexTypes.c_ptr_type(result_type.base_type) + return result_type + +def aggressive_spanning_type(types, might_overflow, scope): + return simply_type(reduce(find_spanning_type, types)) + +def safe_spanning_type(types, might_overflow, scope): + result_type = simply_type(reduce(find_spanning_type, types)) + if result_type.is_pyobject: + # In theory, any specific Python type is always safe to + # infer. However, inferring str can cause some existing code + # to break, since we are also now much more strict about + # coercion from str to char *. See trac #553. + if result_type.name == 'str': + return py_object_type + else: + return result_type + elif (result_type is PyrexTypes.c_double_type or + result_type is PyrexTypes.c_float_type): + # Python's float type is just a C double, so it's safe to use + # the C type instead. Similarly if given a C float, it leads to + # a small loss of precision vs Python but is otherwise the same + return result_type + elif result_type is PyrexTypes.c_bint_type: + # find_spanning_type() only returns 'bint' for clean boolean + # operations without other int types, so this is safe, too + return result_type + elif result_type.is_pythran_expr: + return result_type + elif result_type.is_ptr: + # Any pointer except (signed|unsigned|) char* can't implicitly + # become a PyObject, and inferring char* is now accepted, too. + return result_type + elif result_type.is_cpp_class: + # These can't implicitly become Python objects either. + return result_type + elif result_type.is_struct: + # Though we have struct -> object for some structs, this is uncommonly + # used, won't arise in pure Python, and there shouldn't be side + # effects, so I'm declaring this safe. + return result_type + elif result_type.is_memoryviewslice: + return result_type + elif result_type is PyrexTypes.soft_complex_type: + return result_type + elif result_type == PyrexTypes.c_double_complex_type: + return result_type + elif (result_type.is_int or result_type.is_enum) and not might_overflow: + return result_type + elif (not result_type.can_coerce_to_pyobject(scope) + and not result_type.is_error): + return result_type + return py_object_type + + +def get_type_inferer(): + return SimpleAssignmentTypeInferer() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Visitor.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Visitor.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9c5aac6dcc7fad0d09670aa8398b58cb2ce2d995 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Visitor.pxd @@ -0,0 +1,55 @@ +# cython: language_level=3str + +cimport cython + +cdef class TreeVisitor: + cdef public list access_path + cdef dict dispatch_table + + cpdef visit(self, obj) + cdef _visit(self, obj) + cdef find_handler(self, obj) + cdef _visitchild(self, child, parent, attrname, idx) + cdef dict _visitchildren(self, parent, attrs, exclude) + cpdef visitchildren(self, parent, attrs=*, exclude=*) + cdef _raise_compiler_error(self, child, e) + +cdef class VisitorTransform(TreeVisitor): + cdef dict _process_children(self, parent, attrs=*, exclude=*) + cpdef visitchildren(self, parent, attrs=*, exclude=*) + cdef list _flatten_list(self, list orig_list) + cpdef visitchild(self, parent, str attr, idx=*) + +cdef class CythonTransform(VisitorTransform): + cdef public context + cdef public current_directives + +cdef class ScopeTrackingTransform(CythonTransform): + cdef public scope_type + cdef public scope_node + cdef visit_scope(self, node, scope_type) + +cdef class EnvTransform(CythonTransform): + cdef public list env_stack + +cdef class MethodDispatcherTransform(EnvTransform): + @cython.final + cdef _visit_binop_node(self, node) + @cython.final + cdef _find_handler(self, match_name, bint has_kwargs) + @cython.final + cdef _delegate_to_assigned_value(self, node, function, arg_list, kwargs) + @cython.final + cdef _dispatch_to_handler(self, node, function, arg_list, kwargs) + @cython.final + cdef _dispatch_to_method_handler(self, attr_name, self_arg, + is_unbound_method, type_name, + node, function, arg_list, kwargs) + +cdef class RecursiveNodeReplacer(VisitorTransform): + cdef public orig_node + cdef public new_node + +cdef class NodeFinder(TreeVisitor): + cdef node + cdef public bint found diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Buffer.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Buffer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f98148415a581963b9902aca37f8204362e8fd3 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Buffer.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/FusedNode.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/FusedNode.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde8f8edbd73b5e1f0b85995306cd162818f3e8d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/FusedNode.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Future.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Future.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a6a37c61a88fe2c26d2ddf371e8077edcaaf7a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Future.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Main.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Main.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43b45719b99b1fa20239c71f599cfbfce7e29d62 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Main.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/MemoryView.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/MemoryView.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10b40f986cb1c71c1c65b62e12bbecf2423d426a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/MemoryView.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TreeFragment.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TreeFragment.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6a286ffc460ae13391cc78567524631a9246e97 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TreeFragment.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TypeInference.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TypeInference.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fc0395058187976ad73e570ccdd9e882a5c466d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/TypeInference.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/UtilNodes.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/UtilNodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bce7747916d2d1d518eeae4d11f87c280b74ff3 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/UtilNodes.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Visitor.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Visitor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a6eac3e0d6d18614c3ab4a764194cc33fa3b88b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Visitor.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Lexicons.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Lexicons.py new file mode 100644 index 0000000000000000000000000000000000000000..11084bda0c78c61e0f0a69fc2118893674a2fdd2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Lexicons.py @@ -0,0 +1,179 @@ +""" +Python Lexical Analyser + +Lexical Analyser Specification +""" +from __future__ import absolute_import + +from . import Actions +from . import DFA +from . import Errors +from . import Machines +from . import Regexps + +# debug_flags for Lexicon constructor +DUMP_NFA = 1 +DUMP_DFA = 2 + + +class State(object): + """ + This class is used as part of a Plex.Lexicon specification to + introduce a user-defined state. + + Constructor: + + State(name, token_specifications) + """ + + name = None + tokens = None + + def __init__(self, name, tokens): + self.name = name + self.tokens = tokens + + +class Lexicon(object): + """ + Lexicon(specification) builds a lexical analyser from the given + |specification|. The specification consists of a list of + specification items. Each specification item may be either: + + 1) A token definition, which is a tuple: + + (pattern, action) + + The |pattern| is a regular axpression built using the + constructors defined in the Plex module. + + The |action| is the action to be performed when this pattern + is recognised (see below). + + 2) A state definition: + + State(name, tokens) + + where |name| is a character string naming the state, + and |tokens| is a list of token definitions as + above. The meaning and usage of states is described + below. + + Actions + ------- + + The |action| in a token specification may be one of three things: + + 1) A function, which is called as follows: + + function(scanner, text) + + where |scanner| is the relevant Scanner instance, and |text| + is the matched text. If the function returns anything + other than None, that value is returned as the value of the + token. If it returns None, scanning continues as if the IGNORE + action were specified (see below). + + 2) One of the following special actions: + + IGNORE means that the recognised characters will be treated as + white space and ignored. Scanning will continue until + the next non-ignored token is recognised before returning. + + TEXT causes the scanned text itself to be returned as the + value of the token. + + 3) Any other value, which is returned as the value of the token. + + States + ------ + + At any given time, the scanner is in one of a number of states. + Associated with each state is a set of possible tokens. When scanning, + only tokens associated with the current state are recognised. + + There is a default state, whose name is the empty string. Token + definitions which are not inside any State definition belong to + the default state. + + The initial state of the scanner is the default state. The state can + be changed in one of two ways: + + 1) Using Begin(state_name) as the action of a token. + + 2) Calling the begin(state_name) method of the Scanner. + + To change back to the default state, use '' as the state name. + """ + + machine = None # Machine + tables = None # StateTableMachine + + def __init__(self, specifications, debug=None, debug_flags=7): + if not isinstance(specifications, list): + raise Errors.InvalidScanner("Scanner definition is not a list") + + nfa = Machines.Machine() + default_initial_state = nfa.new_initial_state('') + token_number = 1 + + for spec in specifications: + if isinstance(spec, State): + user_initial_state = nfa.new_initial_state(spec.name) + for token in spec.tokens: + self.add_token_to_machine( + nfa, user_initial_state, token, token_number) + token_number += 1 + elif isinstance(spec, tuple): + self.add_token_to_machine( + nfa, default_initial_state, spec, token_number) + token_number += 1 + else: + raise Errors.InvalidToken( + token_number, + "Expected a token definition (tuple) or State instance") + + if debug and (debug_flags & 1): + debug.write("\n============= NFA ===========\n") + nfa.dump(debug) + + dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug) + + if debug and (debug_flags & 2): + debug.write("\n============= DFA ===========\n") + dfa.dump(debug) + + self.machine = dfa + + def add_token_to_machine(self, machine, initial_state, token_spec, token_number): + try: + (re, action_spec) = self.parse_token_definition(token_spec) + if isinstance(action_spec, Actions.Action): + action = action_spec + else: + try: + action_spec.__call__ + except AttributeError: + action = Actions.Return(action_spec) + else: + action = Actions.Call(action_spec) + final_state = machine.new_state() + re.build_machine(machine, initial_state, final_state, + match_bol=1, nocase=0) + final_state.set_action(action, priority=-token_number) + except Errors.PlexError as e: + raise e.__class__("Token number %d: %s" % (token_number, e)) + + def parse_token_definition(self, token_spec): + if not isinstance(token_spec, tuple): + raise Errors.InvalidToken("Token definition is not a tuple") + if len(token_spec) != 2: + raise Errors.InvalidToken("Wrong number of items in token definition") + + pattern, action = token_spec + if not isinstance(pattern, Regexps.RE): + raise Errors.InvalidToken("Pattern is not an RE instance") + return (pattern, action) + + def get_initial_state(self, name): + return self.machine.get_initial_state(name) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/DFA.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/DFA.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc8ae942de0960cdedbef2457632ed7ee4936efb Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/DFA.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Machines.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Machines.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2ba149c0f6fbd492a76f4a1058627e8517b7f7a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Machines.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Scanners.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Scanners.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5e7eb5f8ad64170c1aefec80b69a20beba1b08 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Scanners.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Builtins.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Builtins.c new file mode 100644 index 0000000000000000000000000000000000000000..063d5e4bfd92bf1d24f9ed244ea0713590b88cde --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Builtins.c @@ -0,0 +1,609 @@ +/* + * Special implementations of built-in functions and methods. + * + * Optional optimisations for builtins are in Optimize.c. + * + * General object operations and protocols are in ObjectHandling.c. + */ + +//////////////////// Globals.proto //////////////////// + +static PyObject* __Pyx_Globals(void); /*proto*/ + +//////////////////// Globals //////////////////// +//@substitute: naming +//@requires: ObjectHandling.c::GetAttr + +// This is a stub implementation until we have something more complete. +// Currently, we only handle the most common case of a read-only dict +// of Python names. Supporting cdef names in the module and write +// access requires a rewrite as a dedicated class. + +static PyObject* __Pyx_Globals(void) { + return __Pyx_NewRef($moddict_cname); +} + +//////////////////// PyExecGlobals.proto //////////////////// + +static PyObject* __Pyx_PyExecGlobals(PyObject*); + +//////////////////// PyExecGlobals //////////////////// +//@substitute: naming +//@requires: PyExec + +static PyObject* __Pyx_PyExecGlobals(PyObject* code) { + return __Pyx_PyExec2(code, $moddict_cname); +} + +//////////////////// PyExec.proto //////////////////// + +static PyObject* __Pyx_PyExec3(PyObject*, PyObject*, PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject*, PyObject*); + +//////////////////// PyExec //////////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject* o, PyObject* globals) { + return __Pyx_PyExec3(o, globals, NULL); +} + +static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) { + PyObject* result; + PyObject* s = 0; + char *code = 0; + + if (!globals || globals == Py_None) { + globals = $moddict_cname; + } else if (unlikely(!PyDict_Check(globals))) { + __Pyx_TypeName globals_type_name = + __Pyx_PyType_GetName(Py_TYPE(globals)); + PyErr_Format(PyExc_TypeError, + "exec() arg 2 must be a dict, not " __Pyx_FMT_TYPENAME, + globals_type_name); + __Pyx_DECREF_TypeName(globals_type_name); + goto bad; + } + if (!locals || locals == Py_None) { + locals = globals; + } + + if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) { + if (unlikely(PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0)) + goto bad; + } + + if (PyCode_Check(o)) { + if (unlikely(__Pyx_PyCode_HasFreeVars((PyCodeObject *)o))) { + PyErr_SetString(PyExc_TypeError, + "code object passed to exec() may not contain free variables"); + goto bad; + } + #if PY_VERSION_HEX < 0x030200B1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + result = PyEval_EvalCode((PyCodeObject *)o, globals, locals); + #else + result = PyEval_EvalCode(o, globals, locals); + #endif + } else { + PyCompilerFlags cf; + cf.cf_flags = 0; +#if PY_VERSION_HEX >= 0x030800A3 + cf.cf_feature_version = PY_MINOR_VERSION; +#endif + if (PyUnicode_Check(o)) { + cf.cf_flags = PyCF_SOURCE_IS_UTF8; + s = PyUnicode_AsUTF8String(o); + if (unlikely(!s)) goto bad; + o = s; + #if PY_MAJOR_VERSION >= 3 + } else if (unlikely(!PyBytes_Check(o))) { + #else + } else if (unlikely(!PyString_Check(o))) { + #endif + __Pyx_TypeName o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); + PyErr_Format(PyExc_TypeError, + "exec: arg 1 must be string, bytes or code object, got " __Pyx_FMT_TYPENAME, + o_type_name); + __Pyx_DECREF_TypeName(o_type_name); + goto bad; + } + #if PY_MAJOR_VERSION >= 3 + code = PyBytes_AS_STRING(o); + #else + code = PyString_AS_STRING(o); + #endif + if (PyEval_MergeCompilerFlags(&cf)) { + result = PyRun_StringFlags(code, Py_file_input, globals, locals, &cf); + } else { + result = PyRun_String(code, Py_file_input, globals, locals); + } + Py_XDECREF(s); + } + + return result; +bad: + Py_XDECREF(s); + return 0; +} + +//////////////////// GetAttr3.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/ + +//////////////////// GetAttr3 //////////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyErrExceptionMatches + +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +#endif + +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + int res = PyObject_GetOptionalAttr(o, n, &r); + // On failure (res == -1), r is set to NULL. + return (res != 0) ? r : __Pyx_NewRef(d); +#else + #if CYTHON_USE_TYPE_SLOTS + if (likely(PyString_Check(n))) { + r = __Pyx_PyObject_GetAttrStrNoError(o, n); + if (unlikely(!r) && likely(!PyErr_Occurred())) { + r = __Pyx_NewRef(d); + } + return r; + } + #endif + r = PyObject_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +#endif +} + +//////////////////// HasAttr.proto //////////////////// + +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /*proto*/ + +//////////////////// HasAttr //////////////////// +//@requires: ObjectHandling.c::GetAttr + +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (!r) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +//////////////////// Intern.proto //////////////////// + +static PyObject* __Pyx_Intern(PyObject* s); /* proto */ + +//////////////////// Intern //////////////////// +//@requires: ObjectHandling.c::RaiseUnexpectedTypeError + +static PyObject* __Pyx_Intern(PyObject* s) { + if (unlikely(!PyString_CheckExact(s))) { + __Pyx_RaiseUnexpectedTypeError("str", s); + return NULL; + } + Py_INCREF(s); + #if PY_MAJOR_VERSION >= 3 + PyUnicode_InternInPlace(&s); + #else + PyString_InternInPlace(&s); + #endif + return s; +} + +//////////////////// abs_longlong.proto //////////////////// + +static CYTHON_INLINE PY_LONG_LONG __Pyx_abs_longlong(PY_LONG_LONG x) { +#if defined (__cplusplus) && __cplusplus >= 201103L + return std::abs(x); +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + return llabs(x); +#elif defined (_MSC_VER) + // abs() is defined for long, but 64-bits type on MSVC is long long. + // Use MS-specific _abs64() instead, which returns the original (negative) value for abs(-MAX-1) + return _abs64(x); +#elif defined (__GNUC__) + // gcc or clang on 64 bit windows. + return __builtin_llabs(x); +#else + if (sizeof(PY_LONG_LONG) <= sizeof(Py_ssize_t)) + return __Pyx_sst_abs(x); + return (x<0) ? -x : x; +#endif +} + + +//////////////////// py_abs.proto //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *num);/*proto*/ + +#define __Pyx_PyNumber_Absolute(x) \ + ((likely(PyLong_CheckExact(x))) ? \ + (likely(__Pyx_PyLong_IsNonNeg(x)) ? (Py_INCREF(x), (x)) : __Pyx_PyLong_AbsNeg(x)) : \ + PyNumber_Absolute(x)) + +#else +#define __Pyx_PyNumber_Absolute(x) PyNumber_Absolute(x) +#endif + +//////////////////// py_abs //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *n) { +#if PY_VERSION_HEX >= 0x030C00A7 + if (likely(__Pyx_PyLong_IsCompact(n))) { + return PyLong_FromSize_t(__Pyx_PyLong_CompactValueUnsigned(n)); + } +#else + if (likely(Py_SIZE(n) == -1)) { + // digits are unsigned + return PyLong_FromUnsignedLong(__Pyx_PyLong_Digits(n)[0]); + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + { + PyObject *copy = _PyLong_Copy((PyLongObject*)n); + if (likely(copy)) { + #if PY_VERSION_HEX >= 0x030C00A7 + // clear the sign bits to set the sign from SIGN_NEGATIVE (2) to positive (0) + ((PyLongObject*)copy)->long_value.lv_tag = ((PyLongObject*)copy)->long_value.lv_tag & ~_PyLong_SIGN_MASK; + #else + // negate the size to swap the sign + __Pyx_SET_SIZE(copy, -Py_SIZE(copy)); + #endif + } + return copy; + } +#else + return PyNumber_Negative(n); +#endif +} +#endif + + +//////////////////// pow2.proto //////////////////// + +#define __Pyx_PyNumber_Power2(a, b) PyNumber_Power(a, b, Py_None) + + +//////////////////// int_pyucs4.proto //////////////////// + +static CYTHON_INLINE int __Pyx_int_from_UCS4(Py_UCS4 uchar); + +//////////////////// int_pyucs4 //////////////////// + +static int __Pyx_int_from_UCS4(Py_UCS4 uchar) { + int digit = Py_UNICODE_TODIGIT(uchar); + if (unlikely(digit < 0)) { + PyErr_Format(PyExc_ValueError, + "invalid literal for int() with base 10: '%c'", + (int) uchar); + return -1; + } + return digit; +} + + +//////////////////// float_pyucs4.proto //////////////////// + +static CYTHON_INLINE double __Pyx_double_from_UCS4(Py_UCS4 uchar); + +//////////////////// float_pyucs4 //////////////////// + +static double __Pyx_double_from_UCS4(Py_UCS4 uchar) { + double digit = Py_UNICODE_TONUMERIC(uchar); + if (unlikely(digit < 0.0)) { + PyErr_Format(PyExc_ValueError, + "could not convert string to float: '%c'", + (int) uchar); + return -1.0; + } + return digit; +} + + +//////////////////// object_ord.proto //////////////////// +//@requires: TypeConversion.c::UnicodeAsUCS4 + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyObject_Ord(c) \ + (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) +#else +#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) +#endif +static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/ + +//////////////////// object_ord //////////////////// + +static long __Pyx__PyObject_Ord(PyObject* c) { + Py_ssize_t size; + if (PyBytes_Check(c)) { + size = PyBytes_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyBytes_AS_STRING(c)[0]; + } +#if PY_MAJOR_VERSION < 3 + } else if (PyUnicode_Check(c)) { + return (long)__Pyx_PyUnicode_AsPy_UCS4(c); +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + } else if (PyByteArray_Check(c)) { + size = PyByteArray_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyByteArray_AS_STRING(c)[0]; + } +#endif + } else { + // FIXME: support character buffers - but CPython doesn't support them either + __Pyx_TypeName c_type_name = __Pyx_PyType_GetName(Py_TYPE(c)); + PyErr_Format(PyExc_TypeError, + "ord() expected string of length 1, but " __Pyx_FMT_TYPENAME " found", + c_type_name); + __Pyx_DECREF_TypeName(c_type_name); + return (long)(Py_UCS4)-1; + } + PyErr_Format(PyExc_TypeError, + "ord() expected a character, but string of length %zd found", size); + return (long)(Py_UCS4)-1; +} + + +//////////////////// py_dict_keys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/ + +//////////////////// py_dict_keys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return PyDict_Keys(d); +} + +//////////////////// py_dict_values.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); /*proto*/ + +//////////////////// py_dict_values //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return PyDict_Values(d); +} + +//////////////////// py_dict_items.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); /*proto*/ + +//////////////////// py_dict_items //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return PyDict_Items(d); +} + +//////////////////// py_dict_iterkeys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d); /*proto*/ + +//////////////////// py_dict_iterkeys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iterkeys", d); +} + +//////////////////// py_dict_itervalues.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d); /*proto*/ + +//////////////////// py_dict_itervalues //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "itervalues", d); +} + +//////////////////// py_dict_iteritems.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d); /*proto*/ + +//////////////////// py_dict_iteritems //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iteritems", d); +} + +//////////////////// py_dict_viewkeys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewkeys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewkeys", d); +} + +//////////////////// py_dict_viewvalues.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewvalues //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewvalues", d); +} + +//////////////////// py_dict_viewitems.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewitems //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewitems", d); +} + + +//////////////////// pyfrozenset_new.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it); + +//////////////////// pyfrozenset_new //////////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) { + if (it) { + PyObject* result; +#if CYTHON_COMPILING_IN_PYPY + // PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New() + PyObject* args; + args = PyTuple_Pack(1, it); + if (unlikely(!args)) + return NULL; + result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL); + Py_DECREF(args); + return result; +#else + if (PyFrozenSet_CheckExact(it)) { + Py_INCREF(it); + return it; + } + result = PyFrozenSet_New(it); + if (unlikely(!result)) + return NULL; + if ((PY_VERSION_HEX >= 0x031000A1) || likely(PySet_GET_SIZE(result))) + return result; + // empty frozenset is a singleton (on Python <3.10) + // seems wasteful, but CPython does the same + Py_DECREF(result); +#endif + } +#if CYTHON_USE_TYPE_SLOTS + return PyFrozenSet_Type.tp_new(&PyFrozenSet_Type, $empty_tuple, NULL); +#else + return PyObject_Call((PyObject*)&PyFrozenSet_Type, $empty_tuple, NULL); +#endif +} + + +//////////////////// PySet_Update.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it); /*proto*/ + +//////////////////// PySet_Update //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) { + PyObject *retval; + #if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY + if (PyAnySet_Check(it)) { + if (PySet_GET_SIZE(it) == 0) + return 0; + // fast and safe case: CPython will update our result set and return it + retval = PySet_Type.tp_as_number->nb_inplace_or(set, it); + if (likely(retval == set)) { + Py_DECREF(retval); + return 0; + } + if (unlikely(!retval)) + return -1; + // unusual result, fall through to set.update() call below + Py_DECREF(retval); + } + #endif + retval = CALL_UNBOUND_METHOD(PySet_Type, "update", set, it); + if (unlikely(!retval)) return -1; + Py_DECREF(retval); + return 0; +} + +///////////////// memoryview_get_from_buffer.proto //////////////////// + +// buffer is in limited api from Py3.11 +#if !CYTHON_COMPILING_IN_LIMITED_API || CYTHON_LIMITED_API >= 0x030b0000 +#define __Pyx_PyMemoryView_Get_{{name}}(o) PyMemoryView_GET_BUFFER(o)->{{name}} +#else +{{py: +out_types = dict( + ndim='int', readonly='int', + len='Py_ssize_t', itemsize='Py_ssize_t') +}} // can't get format like this unfortunately. It's unicode via getattr +{{py: out_type = out_types[name]}} +static {{out_type}} __Pyx_PyMemoryView_Get_{{name}}(PyObject *obj); /* proto */ +#endif + +////////////// memoryview_get_from_buffer ///////////////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API || CYTHON_LIMITED_API >= 0x030b0000 +#else +{{py: +out_types = dict( + ndim='int', readonly='int', + len='Py_ssize_t', itemsize='Py_ssize_t') +}} +{{py: out_type = out_types[name]}} +static {{out_type}} __Pyx_PyMemoryView_Get_{{name}}(PyObject *obj) { + {{out_type}} result; + PyObject *attr = PyObject_GetAttr(obj, PYIDENT("{{name}}")); + if (!attr) { + goto bad; + } +{{if out_type == 'int'}} + // I'm not worrying about overflow here because + // ultimately it comes from a C struct that's an int + result = PyLong_AsLong(attr); +{{elif out_type == 'Py_ssize_t'}} + result = PyLong_AsSsize_t(attr); +{{endif}} + Py_DECREF(attr); + return result; + + bad: + Py_XDECREF(attr); + return -1; +} +#endif diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CommonStructures.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CommonStructures.c new file mode 100644 index 0000000000000000000000000000000000000000..35875d0ec2473634905d067c875d59091550d4dd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CommonStructures.c @@ -0,0 +1,139 @@ +/////////////// FetchSharedCythonModule.proto /////// + +static PyObject *__Pyx_FetchSharedCythonABIModule(void); + +/////////////// FetchSharedCythonModule //////////// + +static PyObject *__Pyx_FetchSharedCythonABIModule(void) { + return __Pyx_PyImport_AddModuleRef((char*) __PYX_ABI_MODULE_NAME); +} + +/////////////// FetchCommonType.proto /////////////// + +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); +#else +static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); +#endif + +/////////////// FetchCommonType /////////////// +//@requires:ExtensionTypes.c::FixUpExtensionType +//@requires: FetchSharedCythonModule +//@requires:StringTools.c::IncludeStringH + +static int __Pyx_VerifyCachedType(PyObject *cached_type, + const char *name, + Py_ssize_t basicsize, + Py_ssize_t expected_basicsize) { + if (!PyType_Check(cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", name); + return -1; + } + if (basicsize != expected_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + name); + return -1; + } + return 0; +} + +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { + PyObject* abi_module; + const char* object_name; + PyTypeObject *cached_type = NULL; + + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + // get the final part of the object name (after the last dot) + object_name = strrchr(type->tp_name, '.'); + object_name = object_name ? object_name+1 : type->tp_name; + cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + if (__Pyx_VerifyCachedType( + (PyObject *)cached_type, + object_name, + cached_type->tp_basicsize, + type->tp_basicsize) < 0) { + goto bad; + } + goto done; + } + + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + if (PyType_Ready(type) < 0) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) + goto bad; + Py_INCREF(type); + cached_type = type; + +done: + Py_DECREF(abi_module); + // NOTE: always returns owned reference, or NULL on error + return cached_type; + +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#else + +static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *abi_module, *cached_type = NULL; + // get the final part of the object name (after the last dot) + const char* object_name = strrchr(spec->name, '.'); + object_name = object_name ? object_name+1 : spec->name; + + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + + cached_type = PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + Py_ssize_t basicsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); + if (unlikely(!py_basicsize)) goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; +#else + basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; +#endif + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + basicsize, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } + + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + // We pass the ABI module reference to avoid keeping the user module alive by foreign type usages. + CYTHON_UNUSED_VAR(module); + cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); + if (unlikely(!cached_type)) goto bad; + if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; + +done: + Py_DECREF(abi_module); + // NOTE: always returns owned reference, or NULL on error + assert(cached_type == NULL || PyType_Check(cached_type)); + return (PyTypeObject *) cached_type; + +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#endif + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Coroutine.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Coroutine.c new file mode 100644 index 0000000000000000000000000000000000000000..c7ec8ee9ba08cdd6568bbba113f593ef7ad8026a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Coroutine.c @@ -0,0 +1,2635 @@ +//////////////////// GeneratorYieldFrom.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); + +//////////////////// GeneratorYieldFrom //////////////////// +//@requires: Generator + +#if CYTHON_USE_TYPE_SLOTS +static void __Pyx_PyIter_CheckErrorAndDecref(PyObject *source) { + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + PyErr_Format(PyExc_TypeError, + "iter() returned non-iterator of type '" __Pyx_FMT_TYPENAME "'", source_type_name); + __Pyx_DECREF_TypeName(source_type_name); + Py_DECREF(source); +} +#endif + +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *source_gen, *retval; +#ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(source)) { + // TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here + Py_INCREF(source); + source_gen = source; + retval = __Pyx_Generator_Next(source); + } else +#endif + { +#if CYTHON_USE_TYPE_SLOTS + if (likely(Py_TYPE(source)->tp_iter)) { + source_gen = Py_TYPE(source)->tp_iter(source); + if (unlikely(!source_gen)) + return NULL; + if (unlikely(!PyIter_Check(source_gen))) { + __Pyx_PyIter_CheckErrorAndDecref(source_gen); + return NULL; + } + } else + // CPython also allows non-iterable sequences to be iterated over +#endif + { + source_gen = PyObject_GetIter(source); + if (unlikely(!source_gen)) + return NULL; + } + // source_gen is now the iterator, make the first next() call + retval = __Pyx_PyObject_GetIterNextFunc(source_gen)(source_gen); + } + if (likely(retval)) { + gen->yieldfrom = source_gen; + return retval; + } + Py_DECREF(source_gen); + return NULL; +} + + +//////////////////// CoroutineYieldFrom.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); + +//////////////////// CoroutineYieldFrom //////////////////// +//@requires: Coroutine +//@requires: GetAwaitIter + +static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *retval; + PyObject *source_gen = __Pyx__Coroutine_GetAwaitableIter(source); + if (unlikely(!source_gen)) { + return NULL; + } + // source_gen is now the iterator, make the first next() call + if (__Pyx_Coroutine_Check(source_gen)) { + retval = __Pyx_Generator_Next(source_gen); + } else { + retval = __Pyx_PyObject_GetIterNextFunc(source_gen)(source_gen); + } + if (retval) { + gen->yieldfrom = source_gen; + return retval; + } + Py_DECREF(source_gen); + return NULL; +} + +static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *retval; + if (__Pyx_Coroutine_Check(source)) { + if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) { + PyErr_SetString( + PyExc_RuntimeError, + "coroutine is being awaited already"); + return NULL; + } + retval = __Pyx_Generator_Next(source); +#ifdef __Pyx_AsyncGen_USED + // inlined "__pyx_PyAsyncGenASend" handling to avoid the series of generic calls + } else if (__pyx_PyAsyncGenASend_CheckExact(source)) { + retval = __Pyx_async_gen_asend_iternext(source); +#endif + } else { + return __Pyx__Coroutine_Yield_From_Generic(gen, source); + } + if (retval) { + Py_INCREF(source); + gen->yieldfrom = source; + } + return retval; +} + + +//////////////////// GetAwaitIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ + +//////////////////// GetAwaitIter //////////////////// +//@requires: ObjectHandling.c::PyObjectGetMethod +//@requires: ObjectHandling.c::PyObjectCallNoArg +//@requires: ObjectHandling.c::PyObjectCallOneArg + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) { +#ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(o)) { + return __Pyx_NewRef(o); + } +#endif + return __Pyx__Coroutine_GetAwaitableIter(o); +} + + +static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) { +#if PY_VERSION_HEX >= 0x030600B3 && PY_VERSION_HEX < 0x030d0000 || defined(_PyErr_FormatFromCause) + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + _PyErr_FormatFromCause(PyExc_TypeError, + "'async for' received an invalid object from __anext__: " __Pyx_FMT_TYPENAME, source_type_name); + __Pyx_DECREF_TypeName(source_type_name); +#elif PY_MAJOR_VERSION >= 3 + PyObject *exc, *val, *val2, *tb; + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + assert(PyErr_Occurred()); + PyErr_Fetch(&exc, &val, &tb); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + assert(!PyErr_Occurred()); + PyErr_Format(PyExc_TypeError, + "'async for' received an invalid object from __anext__: " __Pyx_FMT_TYPENAME, source_type_name); + __Pyx_DECREF_TypeName(source_type_name); + + PyErr_Fetch(&exc, &val2, &tb); + PyErr_NormalizeException(&exc, &val2, &tb); + Py_INCREF(val); + PyException_SetCause(val2, val); + PyException_SetContext(val2, val); + PyErr_Restore(exc, val2, tb); +#else + // since Py2 does not have exception chaining, it's better to avoid shadowing exceptions there + CYTHON_UNUSED_VAR(source); +#endif +} + +// adapted from genobject.c in Py3.5 +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) { + PyObject *res; +#if CYTHON_USE_ASYNC_SLOTS + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_await)) { + res = (*am->am_await)(obj); + } else +#endif +#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + if (PyCoro_CheckExact(obj)) { + return __Pyx_NewRef(obj); + } else +#endif +#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE) +#if PY_VERSION_HEX >= 0x030C00A6 + if (PyGen_CheckExact(obj) && (PyGen_GetCode((PyGenObject*)obj)->co_flags & CO_ITERABLE_COROUTINE)) { +#else + if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) { +#endif + // Python generator marked with "@types.coroutine" decorator + return __Pyx_NewRef(obj); + } else +#endif + { + PyObject *method = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method); + if (likely(is_method)) { + res = __Pyx_PyObject_CallOneArg(method, obj); + } else if (likely(method)) { + res = __Pyx_PyObject_CallNoArg(method); + } else + goto slot_error; + Py_DECREF(method); + } + if (unlikely(!res)) { + // surprisingly, CPython replaces the exception here... + __Pyx_Coroutine_AwaitableIterError(obj); + goto bad; + } + if (unlikely(!PyIter_Check(res))) { + __Pyx_TypeName res_type_name = __Pyx_PyType_GetName(Py_TYPE(res)); + PyErr_Format(PyExc_TypeError, + "__await__() returned non-iterator of type '" __Pyx_FMT_TYPENAME "'", res_type_name); + __Pyx_DECREF_TypeName(res_type_name); + Py_CLEAR(res); + } else { + int is_coroutine = 0; + #ifdef __Pyx_Coroutine_USED + is_coroutine |= __Pyx_Coroutine_Check(res); + #endif + #if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + is_coroutine |= PyCoro_CheckExact(res); + #endif + if (unlikely(is_coroutine)) { + /* __await__ must return an *iterator*, not + a coroutine or another awaitable (see PEP 492) */ + PyErr_SetString(PyExc_TypeError, + "__await__() returned a coroutine"); + Py_CLEAR(res); + } + } + return res; +slot_error: + { + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "object " __Pyx_FMT_TYPENAME " can't be used in 'await' expression", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + } +bad: + return NULL; +} + + +//////////////////// AsyncIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/ +static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/ + +//////////////////// AsyncIter //////////////////// +//@requires: GetAwaitIter +//@requires: ObjectHandling.c::PyObjectCallMethod0 + +static PyObject *__Pyx_Coroutine_GetAsyncIter_Generic(PyObject *obj) { + __Pyx_TypeName obj_type_name; +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); + if (likely(iter)) + return iter; + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__' + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return NULL; + } +#else + // avoid C warning about 'unused function' + (void)&__Pyx_PyObject_CallMethod0; +#endif + + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'async for' requires an object with __aiter__ method, got " __Pyx_FMT_TYPENAME, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return NULL; +} + + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) { +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(obj)) { + return __Pyx_NewRef(obj); + } +#endif +#if CYTHON_USE_ASYNC_SLOTS + { + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_aiter)) { + return (*am->am_aiter)(obj); + } + } +#endif + return __Pyx_Coroutine_GetAsyncIter_Generic(obj); +} + + +static PyObject *__Pyx__Coroutine_AsyncIterNext(PyObject *obj) { +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__")); + if (likely(value)) + return value; + } + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__' + if (PyErr_ExceptionMatches(PyExc_AttributeError)) +#endif + { + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'async for' requires an object with __anext__ method, got " __Pyx_FMT_TYPENAME, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + } + return NULL; +} + + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) { +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(obj)) { + return __Pyx_async_gen_anext(obj); + } +#endif +#if CYTHON_USE_ASYNC_SLOTS + { + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_anext)) { + return (*am->am_anext)(obj); + } + } +#endif + return __Pyx__Coroutine_AsyncIterNext(obj); +} + + +//////////////////// pep479.proto //////////////////// + +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); /*proto*/ + +//////////////////// pep479 //////////////////// +//@requires: Exceptions.c::GetException + +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen) { + PyObject *exc, *val, *tb, *cur_exc; + __Pyx_PyThreadState_declare + #ifdef __Pyx_StopAsyncIteration_USED + int is_async_stopiteration = 0; + #endif + CYTHON_MAYBE_UNUSED_VAR(in_async_gen); + + cur_exc = PyErr_Occurred(); + if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) { + #ifdef __Pyx_StopAsyncIteration_USED + if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, __Pyx_PyExc_StopAsyncIteration))) { + is_async_stopiteration = 1; + } else + #endif + return; + } + + __Pyx_PyThreadState_assign + // Chain exceptions by moving Stop(Async)Iteration to exc_info before creating the RuntimeError. + // In Py2.x, no chaining happens, but the exception still stays visible in exc_info. + __Pyx_GetException(&exc, &val, &tb); + Py_XDECREF(exc); + Py_XDECREF(val); + Py_XDECREF(tb); + PyErr_SetString(PyExc_RuntimeError, + #ifdef __Pyx_StopAsyncIteration_USED + is_async_stopiteration ? "async generator raised StopAsyncIteration" : + in_async_gen ? "async generator raised StopIteration" : + #endif + "generator raised StopIteration"); +} + + +//////////////////// CoroutineBase.proto //////////////////// +//@substitute: naming + +struct __pyx_CoroutineObject; +typedef PyObject *(*__pyx_coroutine_body_t)(struct __pyx_CoroutineObject *, PyThreadState *, PyObject *); + +#if CYTHON_USE_EXC_INFO_STACK +// See https://bugs.python.org/issue25612 +#define __Pyx_ExcInfoStruct _PyErr_StackItem +#else +// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack. +typedef struct { + PyObject *exc_type; + PyObject *exc_value; + PyObject *exc_traceback; +} __Pyx_ExcInfoStruct; +#endif + +typedef struct __pyx_CoroutineObject { + PyObject_HEAD + __pyx_coroutine_body_t body; + PyObject *closure; + __Pyx_ExcInfoStruct gi_exc_state; + PyObject *gi_weakreflist; + PyObject *classobj; + PyObject *yieldfrom; + PyObject *gi_name; + PyObject *gi_qualname; + PyObject *gi_modulename; + PyObject *gi_code; + PyObject *gi_frame; + int resume_label; + // using T_BOOL for property below requires char value + char is_running; +} __pyx_CoroutineObject; + +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ + +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ + +static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); +static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/ +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); /*proto*/ +static PyObject *__Pyx_Coroutine_Close(PyObject *self); /*proto*/ +static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); /*proto*/ + +// macros for exception state swapping instead of inline functions to make use of the local thread state context +#if CYTHON_USE_EXC_INFO_STACK +#define __Pyx_Coroutine_SwapException(self) +#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) +#else +#define __Pyx_Coroutine_SwapException(self) { \ + __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \ + __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \ + } +#define __Pyx_Coroutine_ResetAndClearException(self) { \ + __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \ + (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \ + } +#endif + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ + __Pyx_PyGen__FetchStopIterationValue($local_tstate_cname, pvalue) +#else +#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) +#endif +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); /*proto*/ +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/ + + +//////////////////// Coroutine.proto //////////////////// + +#define __Pyx_Coroutine_USED +#define __Pyx_Coroutine_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CoroutineType) +// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below +#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj) +#define __Pyx_CoroutineAwait_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CoroutineAwaitType) + +#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name) + +static int __pyx_Coroutine_init(PyObject *module); /*proto*/ +static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/ + +typedef struct { + PyObject_HEAD + PyObject *coroutine; +} __pyx_CoroutineAwaitObject; + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/ +static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args); /*proto*/ + + +//////////////////// Generator.proto //////////////////// + +#define __Pyx_Generator_USED +#define __Pyx_Generator_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_GeneratorType) + +#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) + +static PyObject *__Pyx_Generator_Next(PyObject *self); +static int __pyx_Generator_init(PyObject *module); /*proto*/ + + +//////////////////// AsyncGen //////////////////// +//@requires: AsyncGen.c::AsyncGenerator +// -> empty, only delegates to separate file + + +//////////////////// CoroutineBase //////////////////// +//@substitute: naming +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::SwapException +//@requires: Exceptions.c::RaiseException +//@requires: Exceptions.c::SaveResetException +//@requires: ObjectHandling.c::PyObjectCallMethod1 +//@requires: ObjectHandling.c::PyObjectCallNoArg +//@requires: ObjectHandling.c::PyObjectCallOneArg +//@requires: ObjectHandling.c::PyObjectFastCall +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError +//@requires: CommonStructures.c::FetchCommonType +//@requires: ModuleSetupCode.c::IncludeStructmemberH + +#include +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + +#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) + +// If StopIteration exception is set, fetches its 'value' +// attribute if any, otherwise sets pvalue to None. +// +// Returns 0 if no exception or StopIteration is set. +// If any other exception is set, returns -1 and leaves +// pvalue unchanged. +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *$local_tstate_cname, PyObject **pvalue) { + PyObject *et, *ev, *tb; + PyObject *value = NULL; + CYTHON_UNUSED_VAR($local_tstate_cname); + + __Pyx_ErrFetch(&et, &ev, &tb); + + if (!et) { + Py_XDECREF(tb); + Py_XDECREF(ev); + Py_INCREF(Py_None); + *pvalue = Py_None; + return 0; + } + + // most common case: plain StopIteration without or with separate argument + if (likely(et == PyExc_StopIteration)) { + if (!ev) { + Py_INCREF(Py_None); + value = Py_None; + } +#if PY_VERSION_HEX >= 0x030300A0 + else if (likely(__Pyx_IS_TYPE(ev, (PyTypeObject*)PyExc_StopIteration))) { + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); + } +#endif + // PyErr_SetObject() and friends put the value directly into ev + else if (unlikely(PyTuple_Check(ev))) { + // if it's a tuple, it is interpreted as separate constructor arguments (surprise!) + if (PyTuple_GET_SIZE(ev) >= 1) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + value = PyTuple_GET_ITEM(ev, 0); + Py_INCREF(value); +#else + value = PySequence_ITEM(ev, 0); +#endif + } else { + Py_INCREF(Py_None); + value = Py_None; + } + Py_DECREF(ev); + } + else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { + // 'steal' reference to ev + value = ev; + } + if (likely(value)) { + Py_XDECREF(tb); + Py_DECREF(et); + *pvalue = value; + return 0; + } + } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + + // otherwise: normalise and check what that gives us + PyErr_NormalizeException(&et, &ev, &tb); + if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { + // looks like normalisation failed - raise the new exception + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + Py_XDECREF(tb); + Py_DECREF(et); +#if PY_VERSION_HEX >= 0x030300A0 + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); +#else + { + PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args")); + Py_DECREF(ev); + if (likely(args)) { + value = PySequence_GetItem(args, 0); + Py_DECREF(args); + } + if (unlikely(!value)) { + __Pyx_ErrRestore(NULL, NULL, NULL); + Py_INCREF(Py_None); + value = Py_None; + } + } +#endif + *pvalue = value; + return 0; +} + +static CYTHON_INLINE +void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_CLEAR(exc_state->exc_value); +#else + PyObject *t, *v, *tb; + t = exc_state->exc_type; + v = exc_state->exc_value; + tb = exc_state->exc_traceback; + + exc_state->exc_type = NULL; + exc_state->exc_value = NULL; + exc_state->exc_traceback = NULL; + + Py_XDECREF(t); + Py_XDECREF(v); + Py_XDECREF(tb); +#endif +} + +#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) +static void __Pyx__Coroutine_AlreadyRunningError(__pyx_CoroutineObject *gen) { + const char *msg; + CYTHON_MAYBE_UNUSED_VAR(gen); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { + msg = "coroutine already executing"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { + msg = "async generator already executing"; + #endif + } else { + msg = "generator already executing"; + } + PyErr_SetString(PyExc_ValueError, msg); +} + +#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) +static void __Pyx__Coroutine_NotStartedError(PyObject *gen) { + const char *msg; + CYTHON_MAYBE_UNUSED_VAR(gen); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check(gen)) { + msg = "can't send non-None value to a just-started coroutine"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact(gen)) { + msg = "can't send non-None value to a just-started async generator"; + #endif + } else { + msg = "can't send non-None value to a just-started generator"; + } + PyErr_SetString(PyExc_TypeError, msg); +} + +#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL) +static void __Pyx__Coroutine_AlreadyTerminatedError(PyObject *gen, PyObject *value, int closing) { + CYTHON_MAYBE_UNUSED_VAR(gen); + CYTHON_MAYBE_UNUSED_VAR(closing); + #ifdef __Pyx_Coroutine_USED + if (!closing && __Pyx_Coroutine_Check(gen)) { + // `self` is an exhausted coroutine: raise an error, + // except when called from gen_close(), which should + // always be a silent method. + PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); + } else + #endif + if (value) { + // `gen` is an exhausted generator: + // only set exception if called from send(). + #ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(gen)) + PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); + else + #endif + PyErr_SetNone(PyExc_StopIteration); + } +} + +static +PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { + __Pyx_PyThreadState_declare + PyThreadState *tstate; + __Pyx_ExcInfoStruct *exc_state; + PyObject *retval; + + assert(!self->is_running); + + if (unlikely(self->resume_label == 0)) { + if (unlikely(value && value != Py_None)) { + return __Pyx_Coroutine_NotStartedError((PyObject*)self); + } + } + + if (unlikely(self->resume_label == -1)) { + return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); + } + +#if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + tstate = $local_tstate_cname; +#else + tstate = __Pyx_PyThreadState_Current; +#endif + + // Traceback/Frame rules pre-Py3.7: + // - on entry, save external exception state in self->gi_exc_state, restore it on exit + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame + // - on exit, clear "f_back" of internal exception traceback + // - do not touch external frames and tracebacks + + // Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK): + // - on entry, push internal exception state in self->gi_exc_state on the exception stack + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame + // - on exit, clear "f_back" of internal exception traceback + // - do not touch external frames and tracebacks + + exc_state = &self->gi_exc_state; + if (exc_state->exc_value) { + #if CYTHON_COMPILING_IN_PYPY + // FIXME: what to do in PyPy? + #else + // Generators always return to their most recent caller, not + // necessarily their creator. + PyObject *exc_tb; + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + // owned reference! + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #elif PY_VERSION_HEX >= 0x030B00a4 + exc_tb = ((PyBaseExceptionObject*) exc_state->exc_value)->traceback; + #else + exc_tb = exc_state->exc_traceback; + #endif + if (exc_tb) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + + assert(f->f_back == NULL); + #if PY_VERSION_HEX >= 0x030B00A1 + // PyThreadState_GetFrame returns NULL if there isn't a current frame + // which is a valid state so no need to check + f->f_back = PyThreadState_GetFrame(tstate); + #else + Py_XINCREF(tstate->frame); + f->f_back = tstate->frame; + #endif + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + Py_DECREF(exc_tb); + #endif + } + #endif + } + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state->previous_item = tstate->exc_info; + tstate->exc_info = exc_state; +#else + if (exc_state->exc_type) { + // We were in an except handler when we left, + // restore the exception state which was put aside. + __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + // self->exc_* now holds the exception state of the caller + } else { + // save away the exception state of the caller + __Pyx_Coroutine_ExceptionClear(exc_state); + __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + } +#endif + + self->is_running = 1; + retval = self->body(self, tstate, value); + self->is_running = 0; + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state = &self->gi_exc_state; + tstate->exc_info = exc_state->previous_item; + exc_state->previous_item = NULL; + // Cut off the exception frame chain so that we can reconnect it on re-entry above. + __Pyx_Coroutine_ResetFrameBackpointer(exc_state); +#endif + + return retval; +} + +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { + // Don't keep the reference to f_back any longer than necessary. It + // may keep a chain of frames alive or it could create a reference + // cycle. +#if CYTHON_COMPILING_IN_PYPY + // FIXME: what to do in PyPy? + CYTHON_UNUSED_VAR(exc_state); +#else + PyObject *exc_tb; + + #if PY_VERSION_HEX >= 0x030B00a4 + if (!exc_state->exc_value) return; + // owned reference! + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #else + exc_tb = exc_state->exc_traceback; + #endif + + if (likely(exc_tb)) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + Py_CLEAR(f->f_back); + #if PY_VERSION_HEX >= 0x030B00a4 + Py_DECREF(exc_tb); + #endif + } +#endif +} + +static CYTHON_INLINE +PyObject *__Pyx_Coroutine_MethodReturn(PyObject* gen, PyObject *retval) { + CYTHON_MAYBE_UNUSED_VAR(gen); + if (unlikely(!retval)) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (!__Pyx_PyErr_Occurred()) { + // method call must not terminate with NULL without setting an exception + PyObject *exc = PyExc_StopIteration; + #ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(gen)) + exc = __Pyx_PyExc_StopAsyncIteration; + #endif + __Pyx_PyErr_SetNone(exc); + } + } + return retval; +} + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) +static CYTHON_INLINE +PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { +#if PY_VERSION_HEX <= 0x030A00A1 + return _PyGen_Send(gen, arg); +#else + PyObject *result; + // PyIter_Send() asserts non-NULL arg + if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) { + if (PyAsyncGen_CheckExact(gen)) { + assert(result == Py_None); + PyErr_SetNone(PyExc_StopAsyncIteration); + } + else if (result == Py_None) { + PyErr_SetNone(PyExc_StopIteration); + } + else { +#if PY_VERSION_HEX < 0x030d00A1 + _PyGen_SetStopIterationValue(result); +#else + if (!PyTuple_Check(result) && !PyExceptionInstance_Check(result)) { + // delay instantiation if possible + PyErr_SetObject(PyExc_StopIteration, result); + } else { + PyObject *exc = __Pyx_PyObject_CallOneArg(PyExc_StopIteration, result); + if (likely(exc != NULL)) { + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); + } + } +#endif + } + Py_DECREF(result); + result = NULL; + } + return result; +#endif +} +#endif + +static CYTHON_INLINE +PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { + PyObject *ret; + PyObject *val = NULL; + __Pyx_Coroutine_Undelegate(gen); + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); + // val == NULL on failure => pass on exception + ret = __Pyx_Coroutine_SendEx(gen, val, 0); + Py_XDECREF(val); + return ret; +} + +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { + PyObject *retval; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + ret = __Pyx_async_gen_asend_send(yf, value); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyCoro_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + #endif + { + if (value == Py_None) + ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); + else + ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value); + } + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + retval = __Pyx_Coroutine_FinishDelegation(gen); + } else { + retval = __Pyx_Coroutine_SendEx(gen, value, 0); + } + return __Pyx_Coroutine_MethodReturn(self, retval); +} + +// This helper function is used by gen_close and gen_throw to +// close a subiterator being delegated to by yield-from. +static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { + PyObject *retval = NULL; + int err = 0; + + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else + if (__Pyx_CoroutineAwait_CheckExact(yf)) { + retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); + if (!retval) + return -1; + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + retval = __Pyx_async_gen_asend_close(yf, NULL); + // cannot fail + } else + if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { + retval = __Pyx_async_gen_athrow_close(yf, NULL); + // cannot fail + } else + #endif + { + PyObject *meth; + gen->is_running = 1; + meth = __Pyx_PyObject_GetAttrStrNoError(yf, PYIDENT("close")); + if (unlikely(!meth)) { + if (unlikely(PyErr_Occurred())) { + PyErr_WriteUnraisable(yf); + } + } else { + retval = __Pyx_PyObject_CallNoArg(meth); + Py_DECREF(meth); + if (unlikely(!retval)) + err = -1; + } + gen->is_running = 0; + } + Py_XDECREF(retval); + return err; +} + +static PyObject *__Pyx_Generator_Next(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + // YieldFrom code ensures that yf is an iterator + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Generator_Next(yf); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + ret = __Pyx_Coroutine_Send(yf, Py_None); + } else + #endif + ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + return __Pyx_Coroutine_FinishDelegation(gen); + } + return __Pyx_Coroutine_SendEx(gen, Py_None, 0); +} + +static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_Coroutine_Close(self); +} + +static PyObject *__Pyx_Coroutine_Close(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *retval, *raised_exception; + PyObject *yf = gen->yieldfrom; + int err = 0; + + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + + if (yf) { + Py_INCREF(yf); + err = __Pyx_Coroutine_CloseIter(gen, yf); + __Pyx_Coroutine_Undelegate(gen); + Py_DECREF(yf); + } + if (err == 0) + PyErr_SetNone(PyExc_GeneratorExit); + retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); + if (unlikely(retval)) { + const char *msg; + Py_DECREF(retval); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check(self)) { + msg = "coroutine ignored GeneratorExit"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact(self)) { +#if PY_VERSION_HEX < 0x03060000 + msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)"; +#else + msg = "async generator ignored GeneratorExit"; +#endif + #endif + } else { + msg = "generator ignored GeneratorExit"; + } + PyErr_SetString(PyExc_RuntimeError, msg); + return NULL; + } + raised_exception = PyErr_Occurred(); + if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { + // ignore these errors + if (raised_exception) PyErr_Clear(); + Py_INCREF(Py_None); + return Py_None; + } + return NULL; +} + +static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, + PyObject *args, int close_on_genexit) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *yf = gen->yieldfrom; + + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + + if (yf) { + PyObject *ret; + Py_INCREF(yf); + if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { + // Asynchronous generators *should not* be closed right away. + // We have to allow some awaits to work it through, hence the + // `close_on_genexit` parameter here. + int err = __Pyx_Coroutine_CloseIter(gen, yf); + Py_DECREF(yf); + __Pyx_Coroutine_Undelegate(gen); + if (err < 0) + return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); + goto throw_here; + } + gen->is_running = 1; + if (0 + #ifdef __Pyx_Generator_USED + || __Pyx_Generator_CheckExact(yf) + #endif + #ifdef __Pyx_Coroutine_USED + || __Pyx_Coroutine_Check(yf) + #endif + ) { + ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { + ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); + #endif + } else { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(yf, PYIDENT("throw")); + if (unlikely(!meth)) { + Py_DECREF(yf); + if (unlikely(PyErr_Occurred())) { + gen->is_running = 0; + return NULL; + } + __Pyx_Coroutine_Undelegate(gen); + gen->is_running = 0; + goto throw_here; + } + if (likely(args)) { + ret = __Pyx_PyObject_Call(meth, args, NULL); + } else { + // "tb" or even "val" might be NULL, but that also correctly terminates the argument list + PyObject *cargs[4] = {NULL, typ, val, tb}; + ret = __Pyx_PyObject_FastCall(meth, cargs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); + } + Py_DECREF(meth); + } + gen->is_running = 0; + Py_DECREF(yf); + if (!ret) { + ret = __Pyx_Coroutine_FinishDelegation(gen); + } + return __Pyx_Coroutine_MethodReturn(self, ret); + } +throw_here: + __Pyx_Raise(typ, val, tb, NULL); + return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); +} + +static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { + PyObject *typ; + PyObject *val = NULL; + PyObject *tb = NULL; + + if (unlikely(!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))) + return NULL; + + return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); +} + +static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_VISIT(exc_state->exc_value); +#else + Py_VISIT(exc_state->exc_type); + Py_VISIT(exc_state->exc_value); + Py_VISIT(exc_state->exc_traceback); +#endif + return 0; +} + +static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { + Py_VISIT(gen->closure); + Py_VISIT(gen->classobj); + Py_VISIT(gen->yieldfrom); + return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); +} + +static int __Pyx_Coroutine_clear(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + Py_CLEAR(gen->closure); + Py_CLEAR(gen->classobj); + Py_CLEAR(gen->yieldfrom); + __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); + } +#endif + Py_CLEAR(gen->gi_code); + Py_CLEAR(gen->gi_frame); + Py_CLEAR(gen->gi_name); + Py_CLEAR(gen->gi_qualname); + Py_CLEAR(gen->gi_modulename); + return 0; +} + +static void __Pyx_Coroutine_dealloc(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + PyObject_GC_UnTrack(gen); + if (gen->gi_weakreflist != NULL) + PyObject_ClearWeakRefs(self); + + if (gen->resume_label >= 0) { + // Generator is paused or unstarted, so we need to close + PyObject_GC_Track(self); +#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE + if (unlikely(PyObject_CallFinalizerFromDealloc(self))) +#else + Py_TYPE(gen)->tp_del(self); + if (unlikely(Py_REFCNT(self) > 0)) +#endif + { + // resurrected. :( + return; + } + PyObject_GC_UnTrack(self); + } + +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + /* We have to handle this case for asynchronous generators + right here, because this code has to be between UNTRACK + and GC_Del. */ + Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); + } +#endif + __Pyx_Coroutine_clear(self); + __Pyx_PyHeapTypeObject_GC_Del(gen); +} + +static void __Pyx_Coroutine_del(PyObject *self) { + PyObject *error_type, *error_value, *error_traceback; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + __Pyx_PyThreadState_declare + + if (gen->resume_label < 0) { + // already terminated => nothing to clean up + return; + } + +#if !CYTHON_USE_TP_FINALIZE + // Temporarily resurrect the object. + assert(self->ob_refcnt == 0); + __Pyx_SET_REFCNT(self, 1); +#endif + + __Pyx_PyThreadState_assign + + // Save the current exception, if any. + __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); + +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; + PyObject *finalizer = agen->ag_finalizer; + if (finalizer && !agen->ag_closed) { + PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); + if (unlikely(!res)) { + PyErr_WriteUnraisable(self); + } else { + Py_DECREF(res); + } + // Restore the saved exception. + __Pyx_ErrRestore(error_type, error_value, error_traceback); + return; + } + } +#endif + + if (unlikely(gen->resume_label == 0 && !error_value)) { +#ifdef __Pyx_Coroutine_USED +#ifdef __Pyx_Generator_USED + // only warn about (async) coroutines + if (!__Pyx_Generator_CheckExact(self)) +#endif + { + // untrack dead object as we are executing Python code (which might trigger GC) + PyObject_GC_UnTrack(self); +#if PY_MAJOR_VERSION >= 3 /* PY_VERSION_HEX >= 0x03030000*/ || defined(PyErr_WarnFormat) + if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) + PyErr_WriteUnraisable(self); +#else + {PyObject *msg; + char *cmsg; + #if CYTHON_COMPILING_IN_PYPY + msg = NULL; + cmsg = (char*) "coroutine was never awaited"; + #else + char *cname; + PyObject *qualname; + qualname = gen->gi_qualname; + cname = PyString_AS_STRING(qualname); + msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); + + if (unlikely(!msg)) { + PyErr_Clear(); + cmsg = (char*) "coroutine was never awaited"; + } else { + cmsg = PyString_AS_STRING(msg); + } + #endif + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0)) + PyErr_WriteUnraisable(self); + Py_XDECREF(msg);} +#endif + PyObject_GC_Track(self); + } +#endif /*__Pyx_Coroutine_USED*/ + } else { + PyObject *res = __Pyx_Coroutine_Close(self); + if (unlikely(!res)) { + if (PyErr_Occurred()) + PyErr_WriteUnraisable(self); + } else { + Py_DECREF(res); + } + } + + // Restore the saved exception. + __Pyx_ErrRestore(error_type, error_value, error_traceback); + +#if !CYTHON_USE_TP_FINALIZE + // Undo the temporary resurrection; can't use DECREF here, it would + // cause a recursive call. + assert(Py_REFCNT(self) > 0); + if (likely(--self->ob_refcnt == 0)) { + // this is the normal path out + return; + } + + // close() resurrected it! Make it look like the original Py_DECREF + // never happened. + { + Py_ssize_t refcnt = Py_REFCNT(self); + _Py_NewReference(self); + __Pyx_SET_REFCNT(self, refcnt); + } +#if CYTHON_COMPILING_IN_CPYTHON + assert(PyType_IS_GC(Py_TYPE(self)) && + _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); + + // If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so + // we need to undo that. + _Py_DEC_REFTOTAL; +#endif + // If Py_TRACE_REFS, _Py_NewReference re-added self to the object + // chain, so no more to do there. + // If COUNT_ALLOCS, the original decref bumped tp_frees, and + // _Py_NewReference bumped tp_allocs: both of those need to be + // undone. +#ifdef COUNT_ALLOCS + --Py_TYPE(self)->tp_frees; + --Py_TYPE(self)->tp_allocs; +#endif +#endif +} + +static PyObject * +__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_name; + CYTHON_UNUSED_VAR(context); + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_name, value); + return 0; +} + +static PyObject * +__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_qualname; + CYTHON_UNUSED_VAR(context); + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_qualname, value); + return 0; +} + +static PyObject * +__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, void *context) +{ + PyObject *frame = self->gi_frame; + CYTHON_UNUSED_VAR(context); + if (!frame) { + if (unlikely(!self->gi_code)) { + // Avoid doing something stupid, e.g. during garbage collection. + Py_RETURN_NONE; + } + frame = (PyObject *) PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + (PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/ + $moddict_cname, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (unlikely(!frame)) + return NULL; + // keep the frame cached once it's created + self->gi_frame = frame; + } + Py_INCREF(frame); + return frame; +} + +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); + if (unlikely(!gen)) + return NULL; + return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); +} + +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + gen->body = body; + gen->closure = closure; + Py_XINCREF(closure); + gen->is_running = 0; + gen->resume_label = 0; + gen->classobj = NULL; + gen->yieldfrom = NULL; + #if PY_VERSION_HEX >= 0x030B00a4 + gen->gi_exc_state.exc_value = NULL; + #else + gen->gi_exc_state.exc_type = NULL; + gen->gi_exc_state.exc_value = NULL; + gen->gi_exc_state.exc_traceback = NULL; + #endif +#if CYTHON_USE_EXC_INFO_STACK + gen->gi_exc_state.previous_item = NULL; +#endif + gen->gi_weakreflist = NULL; + Py_XINCREF(qualname); + gen->gi_qualname = qualname; + Py_XINCREF(name); + gen->gi_name = name; + Py_XINCREF(module_name); + gen->gi_modulename = module_name; + Py_XINCREF(code); + gen->gi_code = code; + gen->gi_frame = NULL; + + PyObject_GC_Track(gen); + return gen; +} + + +//////////////////// Coroutine //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC +//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict + +static void __Pyx_CoroutineAwait_dealloc(PyObject *self) { + PyObject_GC_UnTrack(self); + Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine); + __Pyx_PyHeapTypeObject_GC_Del(self); +} + +static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) { + Py_VISIT(self->coroutine); + return 0; +} + +static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) { + Py_CLEAR(self->coroutine); + return 0; +} + +static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) { + return __Pyx_Generator_Next(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) { + return __Pyx_Coroutine_Send(self->coroutine, value); +} + +static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) { + return __Pyx_Coroutine_Throw(self->coroutine, args); +} + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_Coroutine_Close(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) { + Py_INCREF(self); + return self; +} + +#if !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_CoroutineAwait_no_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { + CYTHON_UNUSED_VAR(type); + CYTHON_UNUSED_VAR(args); + CYTHON_UNUSED_VAR(kwargs); + PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead"); + return NULL; +} +#endif + +// In earlier versions of Python an object with no __dict__ and not __slots__ is assumed +// to be pickleable by default. Coroutine-wrappers have significant state so shouldn't be. +// Therefore provide a default implementation. +// Something similar applies to heaptypes (i.e. with type_specs) with protocols 0 and 1 +// even in more recent versions. +// We are applying this to all Python versions (hence the commented out version guard) +// to make the behaviour explicit. +// #if PY_VERSION_HEX < 0x03060000 || CYTHON_USE_TYPE_SPECS +static PyObject *__Pyx_CoroutineAwait_reduce_ex(__pyx_CoroutineAwaitObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + PyErr_Format(PyExc_TypeError, "cannot pickle '%.200s' object", + Py_TYPE(self)->tp_name); + return NULL; +} +// #endif + +static PyMethodDef __pyx_CoroutineAwait_methods[] = { + {"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, +// only needed with type-specs or version<3.6, but included in all versions for clarity +// #if PY_VERSION_HEX < 0x03060000 || CYTHON_USE_TYPE_SPECS + {"__reduce_ex__", (PyCFunction) __Pyx_CoroutineAwait_reduce_ex, METH_O, 0}, + {"__reduce__", (PyCFunction) __Pyx_CoroutineAwait_reduce_ex, METH_NOARGS, 0}, +// #endif + {0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CoroutineAwaitType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CoroutineAwait_dealloc}, + {Py_tp_traverse, (void *)__Pyx_CoroutineAwait_traverse}, + {Py_tp_clear, (void *)__Pyx_CoroutineAwait_clear}, +#if !CYTHON_COMPILING_IN_PYPY + {Py_tp_new, (void *)__Pyx_CoroutineAwait_no_new}, +#endif + {Py_tp_methods, (void *)__pyx_CoroutineAwait_methods}, + {Py_tp_iter, (void *)__Pyx_CoroutineAwait_self}, + {Py_tp_iternext, (void *)__Pyx_CoroutineAwait_Next}, + {0, 0}, +}; + +static PyType_Spec __pyx_CoroutineAwaitType_spec = { + __PYX_TYPE_MODULE_PREFIX "coroutine_wrapper", + sizeof(__pyx_CoroutineAwaitObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + __pyx_CoroutineAwaitType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_CoroutineAwaitType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "coroutine_wrapper", /*tp_name*/ + sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async resp. tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/ + (traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/ + (inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + __Pyx_CoroutineAwait_self, /*tp_iter*/ + (iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/ + __pyx_CoroutineAwait_methods, /*tp_methods*/ + 0 , /*tp_members*/ + 0 , /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ +#if !CYTHON_COMPILING_IN_PYPY + __Pyx_CoroutineAwait_no_new, /*tp_new*/ +#else + 0, /*tp_new*/ +#endif + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if PY_VERSION_HEX >= 0x030d00A4 + 0, /*tp_versions_used*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) { + __pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType); + if (unlikely(!await)) return NULL; + Py_INCREF(coroutine); + await->coroutine = coroutine; + PyObject_GC_Track(await); + return (PyObject*)await; +} +#endif + +#if PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) { + if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) { + PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine"); + return NULL; + } + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) { + PyObject* result; + switch (op) { + case Py_EQ: result = (other == obj) ? Py_True : Py_False; break; + case Py_NE: result = (other != obj) ? Py_True : Py_False; break; + default: + result = Py_NotImplemented; + } + Py_INCREF(result); + return result; +} +#endif + +static PyMethodDef __pyx_Coroutine_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, +#if PY_VERSION_HEX < 0x030500B1 + {"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS, + (char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")}, +#endif + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Coroutine_memberlist[] = { + {(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being awaited, or None")}, + {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if CYTHON_USE_TYPE_SPECS + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Coroutine_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the coroutine"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the coroutine"), 0}, + {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + (char*) PyDoc_STR("Frame of the coroutine"), 0}, + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CoroutineType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_am_await, (void *)&__Pyx_Coroutine_await}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_methods, (void *)__pyx_Coroutine_methods}, + {Py_tp_members, (void *)__pyx_Coroutine_memberlist}, + {Py_tp_getset, (void *)__pyx_Coroutine_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_CoroutineType_spec = { + __PYX_TYPE_MODULE_PREFIX "coroutine", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_CoroutineType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = { + __Pyx_Coroutine_await, /*am_await*/ + 0, /*am_aiter*/ + 0, /*am_anext*/ +#if PY_VERSION_HEX >= 0x030A00A3 + 0, /*am_send*/ +#endif +}; +#endif + +static PyTypeObject __pyx_CoroutineType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "coroutine", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if CYTHON_USE_ASYNC_SLOTS + &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ +#else + 0, /*tp_reserved*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + // no tp_iter() as iterator is only available through __await__() + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_Coroutine_methods, /*tp_methods*/ + __pyx_Coroutine_memberlist, /*tp_members*/ + __pyx_Coroutine_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if CYTHON_USE_TP_FINALIZE + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if CYTHON_USE_TP_FINALIZE + __Pyx_Coroutine_del, /*tp_finalize*/ +#elif PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if PY_VERSION_HEX >= 0x030d00A4 + 0, /*tp_versions_used*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +static int __pyx_Coroutine_init(PyObject *module) { + CYTHON_MAYBE_UNUSED_VAR(module); + // on Windows, C-API functions can't be used in slots statically +#if CYTHON_USE_TYPE_SPECS + __pyx_CoroutineType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CoroutineType_spec, NULL); +#else + __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type); +#endif + if (unlikely(!__pyx_CoroutineType)) + return -1; + +#ifdef __Pyx_IterableCoroutine_USED + if (unlikely(__pyx_IterableCoroutine_init(module) == -1)) + return -1; +#endif + +#if CYTHON_USE_TYPE_SPECS + __pyx_CoroutineAwaitType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CoroutineAwaitType_spec, NULL); +#else + __pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type); +#endif + if (unlikely(!__pyx_CoroutineAwaitType)) + return -1; + return 0; +} + + +//////////////////// IterableCoroutine.proto //////////////////// + +#define __Pyx_IterableCoroutine_USED + +#undef __Pyx_Coroutine_Check +#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || __Pyx_IS_TYPE(obj, __pyx_IterableCoroutineType)) + +#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name) + +static int __pyx_IterableCoroutine_init(PyObject *module);/*proto*/ + + +//////////////////// IterableCoroutine //////////////////// +//@requires: Coroutine +//@requires: CommonStructures.c::FetchCommonType + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_IterableCoroutineType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_am_await, (void *)&__Pyx_Coroutine_await}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_iter, (void *)__Pyx_Coroutine_await}, + {Py_tp_iternext, (void *)__Pyx_Generator_Next}, + {Py_tp_methods, (void *)__pyx_Coroutine_methods}, + {Py_tp_members, (void *)__pyx_Coroutine_memberlist}, + {Py_tp_getset, (void *)__pyx_Coroutine_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_IterableCoroutineType_spec = { + __PYX_TYPE_MODULE_PREFIX "iterable_coroutine", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_IterableCoroutineType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_IterableCoroutineType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "iterable_coroutine", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if CYTHON_USE_ASYNC_SLOTS + &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ +#else + 0, /*tp_reserved*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + // enable iteration for legacy support of asyncio yield-from protocol + __Pyx_Coroutine_await, /*tp_iter*/ + (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ + __pyx_Coroutine_methods, /*tp_methods*/ + __pyx_Coroutine_memberlist, /*tp_members*/ + __pyx_Coroutine_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 && !CYTHON_COMPILING_IN_PYPY + __Pyx_Coroutine_del, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if PY_VERSION_HEX >= 0x030d00A4 + 0, /*tp_versions_used*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static int __pyx_IterableCoroutine_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_IterableCoroutineType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_IterableCoroutineType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + __pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type); +#endif + if (unlikely(!__pyx_IterableCoroutineType)) + return -1; + return 0; +} + + +//////////////////// Generator //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC +//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict + +static PyMethodDef __pyx_Generator_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Generator_memberlist[] = { + {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, + {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if CYTHON_USE_TYPE_SPECS + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Generator_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the generator"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the generator"), 0}, + {(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + (char*) PyDoc_STR("Frame of the generator"), 0}, + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_GeneratorType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_iter, (void *)PyObject_SelfIter}, + {Py_tp_iternext, (void *)__Pyx_Generator_Next}, + {Py_tp_methods, (void *)__pyx_Generator_methods}, + {Py_tp_members, (void *)__pyx_Generator_memberlist}, + {Py_tp_getset, (void *)__pyx_Generator_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_GeneratorType_spec = { + __PYX_TYPE_MODULE_PREFIX "generator", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_GeneratorType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_GeneratorType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "generator", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ + __pyx_Generator_methods, /*tp_methods*/ + __pyx_Generator_memberlist, /*tp_members*/ + __pyx_Generator_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if CYTHON_USE_TP_FINALIZE + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if CYTHON_USE_TP_FINALIZE + __Pyx_Coroutine_del, /*tp_finalize*/ +#elif PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if PY_VERSION_HEX >= 0x030d00A4 + 0, /*tp_versions_used*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +static int __pyx_Generator_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_GeneratorType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_GeneratorType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + // on Windows, C-API functions can't be used in slots statically + __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; + __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); +#endif + if (unlikely(!__pyx_GeneratorType)) { + return -1; + } + return 0; +} + + +/////////////// ReturnWithStopIteration.proto /////////////// + +#define __Pyx_ReturnWithStopIteration(value) \ + if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value) +static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/ + +/////////////// ReturnWithStopIteration /////////////// +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@substitute: naming + +// 1) Instantiating an exception just to pass back a value is costly. +// 2) CPython 3.12 cannot separate exception type and value +// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments. +// 4) Passing an exception as value will interpret it as an exception on unpacking and raise it (or unpack its value). +// 5) If there is currently an exception being handled, we need to chain it. + +static void __Pyx__ReturnWithStopIteration(PyObject* value) { + PyObject *exc, *args; +#if CYTHON_COMPILING_IN_CPYTHON + __Pyx_PyThreadState_declare + if (PY_VERSION_HEX >= 0x030C00A6 + || unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) { + args = PyTuple_New(1); + if (unlikely(!args)) return; + Py_INCREF(value); + PyTuple_SET_ITEM(args, 0, value); + exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (!exc) return; + } else { + // it's safe to avoid instantiating the exception + Py_INCREF(value); + exc = value; + } + #if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + #if CYTHON_USE_EXC_INFO_STACK + if (!$local_tstate_cname->exc_info->exc_value) + #else + if (!$local_tstate_cname->exc_type) + #endif + { + // no chaining needed => avoid the overhead in PyErr_SetObject() + Py_INCREF(PyExc_StopIteration); + __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL); + return; + } + #endif +#else + args = PyTuple_Pack(1, value); + if (unlikely(!args)) return; + exc = PyObject_Call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (unlikely(!exc)) return; +#endif + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); +} + + +//////////////////// PatchModuleWithCoroutine.proto //////////////////// + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/ + +//////////////////// PatchModuleWithCoroutine //////////////////// +//@substitute: naming + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + int result; + PyObject *globals, *result_obj; + globals = PyDict_New(); if (unlikely(!globals)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_coroutine_type", + #ifdef __Pyx_Coroutine_USED + (PyObject*)__pyx_CoroutineType); + #else + Py_None); + #endif + if (unlikely(result < 0)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_generator_type", + #ifdef __Pyx_Generator_USED + (PyObject*)__pyx_GeneratorType); + #else + Py_None); + #endif + if (unlikely(result < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore; + result_obj = PyRun_String(py_code, Py_file_input, globals, globals); + if (unlikely(!result_obj)) goto ignore; + Py_DECREF(result_obj); + Py_DECREF(globals); + return module; + +ignore: + Py_XDECREF(globals); + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning + py_code++; +#endif + return module; +} + + +//////////////////// PatchGeneratorABC.proto //////////////////// + +// register with Generator/Coroutine ABCs in 'collections.abc' +// see https://bugs.python.org/issue24018 +static int __Pyx_patch_abc(void); /*proto*/ + +//////////////////// PatchGeneratorABC //////////////////// +//@requires: PatchModuleWithCoroutine + +#ifndef CYTHON_REGISTER_ABCS +#define CYTHON_REGISTER_ABCS 1 +#endif + +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) +static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/ +static PyObject* __Pyx_patch_abc_module(PyObject *module) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +if _cython_generator_type is not None: + try: Generator = _module.Generator + except AttributeError: pass + else: Generator.register(_cython_generator_type) +if _cython_coroutine_type is not None: + try: Coroutine = _module.Coroutine + except AttributeError: pass + else: Coroutine.register(_cython_coroutine_type) +""") + ); + return module; +} +#endif + +static int __Pyx_patch_abc(void) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + static int abc_patched = 0; + if (CYTHON_REGISTER_ABCS && !abc_patched) { + PyObject *module; + module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); + if (unlikely(!module)) { + PyErr_WriteUnraisable(NULL); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, + ((PY_MAJOR_VERSION >= 3) ? + "Cython module failed to register with collections.abc module" : + "Cython module failed to register with collections module"), 1) < 0)) { + return -1; + } + } else { + module = __Pyx_patch_abc_module(module); + abc_patched = 1; + if (unlikely(!module)) + return -1; + Py_DECREF(module); + } + // also register with "backports_abc" module if available, just in case + module = PyImport_ImportModule("backports_abc"); + if (module) { + module = __Pyx_patch_abc_module(module); + Py_XDECREF(module); + } + if (!module) { + PyErr_Clear(); + } + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); +#endif + return 0; +} + + +//////////////////// PatchAsyncIO.proto //////////////////// + +// run after importing "asyncio" to patch Cython generator support into it +static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/ + +//////////////////// PatchAsyncIO //////////////////// +//@requires: ImportExport.c::Import +//@requires: PatchModuleWithCoroutine +//@requires: PatchInspect + +static PyObject* __Pyx_patch_asyncio(PyObject* module) { +#if PY_VERSION_HEX < 0x030500B2 && \ + (defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \ + (!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO) + PyObject *patch_module = NULL; + static int asyncio_patched = 0; + if (unlikely((!asyncio_patched) && module)) { + PyObject *package; + package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0); + if (package) { + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "coroutines"), CSTRING("""\ +try: + coro_types = _module._COROUTINE_TYPES +except AttributeError: pass +else: + if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_coroutine_type,) + if _cython_generator_type is not None and _cython_generator_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_generator_type,) +_module._COROUTINE_TYPES = coro_types +""") + ); + } else { + PyErr_Clear(); +// Always enable fallback: even if we compile against 3.4.2, we might be running on 3.4.1 at some point. +//#if PY_VERSION_HEX < 0x03040200 + // Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines + package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0); + if (unlikely(!package)) goto asyncio_done; + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "tasks"), CSTRING("""\ +if hasattr(_module, 'iscoroutine'): + old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None) + if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types): + def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj) + cy_iscoroutine._cython_coroutine_types = cython_coroutine_types + return cy_iscoroutine + _module.iscoroutine = cy_wrap(_module.iscoroutine) + if _cython_coroutine_type is not None: + old_types.add(_cython_coroutine_type) + if _cython_generator_type is not None: + old_types.add(_cython_generator_type) +""") + ); +//#endif +// Py < 0x03040200 + } + Py_DECREF(package); + if (unlikely(!patch_module)) goto ignore; +//#if PY_VERSION_HEX < 0x03040200 +asyncio_done: + PyErr_Clear(); +//#endif + asyncio_patched = 1; +#ifdef __Pyx_Generator_USED + // now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module + { + PyObject *inspect_module; + if (patch_module) { + inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect")); + Py_DECREF(patch_module); + } else { + inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0); + } + if (unlikely(!inspect_module)) goto ignore; + inspect_module = __Pyx_patch_inspect(inspect_module); + if (unlikely(!inspect_module)) { + Py_DECREF(module); + module = NULL; + } + Py_XDECREF(inspect_module); + } +#else + // avoid "unused" warning for __Pyx_patch_inspect() + if ((0)) return __Pyx_patch_inspect(module); +#endif + } + return module; +ignore: + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) return __Pyx_patch_inspect(__Pyx_Coroutine_patch_module(module, NULL)); +#endif + return module; +} + + +//////////////////// PatchInspect.proto //////////////////// + +// run after importing "inspect" to patch Cython generator support into it +static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/ + +//////////////////// PatchInspect //////////////////// +//@requires: PatchModuleWithCoroutine + +static PyObject* __Pyx_patch_inspect(PyObject* module) { +#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT) + static int inspect_patched = 0; + if (unlikely((!inspect_patched) && module)) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +old_types = getattr(_module.isgenerator, '_cython_generator_types', None) +if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_generator_types=old_types): + def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj) + cy_isgenerator._cython_generator_types = cython_generator_types + return cy_isgenerator + _module.isgenerator = cy_wrap(_module.isgenerator) +old_types.add(_cython_generator_type) +""") + ); + inspect_patched = 1; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) return __Pyx_Coroutine_patch_module(module, NULL); +#endif + return module; +} + + +//////////////////// StopAsyncIteration.proto //////////////////// + +#define __Pyx_StopAsyncIteration_USED +static PyObject *__Pyx_PyExc_StopAsyncIteration; +static int __pyx_StopAsyncIteration_init(PyObject *module); /*proto*/ + +//////////////////// StopAsyncIteration //////////////////// + +#if PY_VERSION_HEX < 0x030500B1 +#if CYTHON_USE_TYPE_SPECS +#error Using async coroutines with type specs requires Python 3.5 or later. +#else + +static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = { + PyVarObject_HEAD_INIT(0, 0) + "StopAsyncIteration", /*tp_name*/ + sizeof(PyBaseExceptionObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare / reserved*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM+0 >= 0x06000000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif +#endif + +static int __pyx_StopAsyncIteration_init(PyObject *module) { + CYTHON_UNUSED_VAR(module); +#if PY_VERSION_HEX >= 0x030500B1 + __Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration; +#else + PyObject *builtins = PyEval_GetBuiltins(); + if (likely(builtins)) { + PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration"); + if (exc) { + __Pyx_PyExc_StopAsyncIteration = exc; + return 0; + } + } + PyErr_Clear(); + + __Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse; + __Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear; + __Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset; + __Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception; + + __Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type); + if (unlikely(!__Pyx_PyExc_StopAsyncIteration)) + return -1; + if (likely(builtins) && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0)) + return -1; +#endif + return 0; +} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CppConvert.pyx b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CppConvert.pyx new file mode 100644 index 0000000000000000000000000000000000000000..0e7cf4e2dc40e6eb7c21848b862a12116a35c82f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/CppConvert.pyx @@ -0,0 +1,273 @@ +# TODO: Figure out how many of the pass-by-value copies the compiler can eliminate. + + +#################### string.from_py #################### + +cdef extern from *: + cdef cppclass string "{{type}}": + string() except + + string(char* c_str, size_t size) except + + cdef const char* __Pyx_PyObject_AsStringAndSize(object, Py_ssize_t*) except NULL + +@cname("{{cname}}") +cdef string {{cname}}(object o) except *: + cdef Py_ssize_t length = 0 + cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + return string(data, length) + + +#################### string.to_py #################### + +#cimport cython +#from libcpp.string cimport string +cdef extern from *: + cdef cppclass string "{{type}}": + char* data() + size_t size() + +{{for py_type in ['PyObject', 'PyUnicode', 'PyStr', 'PyBytes', 'PyByteArray']}} +cdef extern from *: + cdef object __Pyx_{{py_type}}_FromStringAndSize(const char*, size_t) + +@cname("{{cname.replace("PyObject", py_type, 1)}}") +cdef inline object {{cname.replace("PyObject", py_type, 1)}}(const string& s): + return __Pyx_{{py_type}}_FromStringAndSize(s.data(), s.size()) +{{endfor}} + + +#################### vector.from_py #################### + +cdef extern from *: + cdef cppclass vector "std::vector" [T]: + void push_back(T&) except + + +@cname("{{cname}}") +cdef vector[X] {{cname}}(object o) except *: + cdef vector[X] v + for item in o: + v.push_back(item) + return v + + +#################### vector.to_py #################### + +cdef extern from *: + cdef cppclass vector "std::vector" [T]: + size_t size() + T& operator[](size_t) + +cdef extern from "Python.h": + void Py_INCREF(object) + list PyList_New(Py_ssize_t size) + void PyList_SET_ITEM(object list, Py_ssize_t i, object o) + const Py_ssize_t PY_SSIZE_T_MAX + +@cname("{{cname}}") +cdef object {{cname}}(const vector[X]& v): + if v.size() > PY_SSIZE_T_MAX: + raise MemoryError() + v_size_signed = v.size() + + o = PyList_New(v_size_signed) + + cdef Py_ssize_t i + cdef object item + + for i in range(v_size_signed): + item = v[i] + Py_INCREF(item) + PyList_SET_ITEM(o, i, item) + + return o + +#################### list.from_py #################### + +cdef extern from *: + cdef cppclass cpp_list "std::list" [T]: + void push_back(T&) except + + +@cname("{{cname}}") +cdef cpp_list[X] {{cname}}(object o) except *: + cdef cpp_list[X] l + for item in o: + l.push_back(item) + return l + + +#################### list.to_py #################### + +cimport cython + +cdef extern from *: + cdef cppclass cpp_list "std::list" [T]: + cppclass const_iterator: + T& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + size_t size() + +cdef extern from "Python.h": + void Py_INCREF(object) + list PyList_New(Py_ssize_t size) + void PyList_SET_ITEM(object list, Py_ssize_t i, object o) + cdef Py_ssize_t PY_SSIZE_T_MAX + +@cname("{{cname}}") +cdef object {{cname}}(const cpp_list[X]& v): + if v.size() > PY_SSIZE_T_MAX: + raise MemoryError() + + o = PyList_New( v.size()) + + cdef object item + cdef Py_ssize_t i = 0 + cdef cpp_list[X].const_iterator iter = v.begin() + + while iter != v.end(): + item = cython.operator.dereference(iter) + Py_INCREF(item) + PyList_SET_ITEM(o, i, item) + cython.operator.preincrement(iter) + i += 1 + + return o + + +#################### set.from_py #################### + +cdef extern from *: + cdef cppclass set "std::{{maybe_unordered}}set" [T]: + void insert(T&) except + + +@cname("{{cname}}") +cdef set[X] {{cname}}(object o) except *: + cdef set[X] s + for item in o: + s.insert(item) + return s + + +#################### set.to_py #################### + +cimport cython + +cdef extern from *: + cdef cppclass cpp_set "std::{{maybe_unordered}}set" [T]: + cppclass const_iterator: + T& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + +@cname("{{cname}}") +cdef object {{cname}}(const cpp_set[X]& s): + return {v for v in s} + +#################### pair.from_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + pair() except + + pair(T&, U&) except + + +@cname("{{cname}}") +cdef pair[X,Y] {{cname}}(object o) except *: + x, y = o + return pair[X,Y](x, y) + + +#################### pair.to_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + T first + U second + +@cname("{{cname}}") +cdef object {{cname}}(const pair[X,Y]& p): + return p.first, p.second + + +#################### map.from_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + pair(T&, U&) except + + cdef cppclass map "std::{{maybe_unordered}}map" [T, U]: + void insert(pair[T, U]&) except + + cdef cppclass vector "std::vector" [T]: + pass + int PY_MAJOR_VERSION + + +@cname("{{cname}}") +cdef map[X,Y] {{cname}}(object o) except *: + cdef map[X,Y] m + if PY_MAJOR_VERSION < 3: + for key, value in o.iteritems(): + m.insert(pair[X,Y](key, value)) + else: + for key, value in o.items(): + m.insert(pair[X,Y](key, value)) + return m + + +#################### map.to_py #################### +# TODO: Work out const so that this can take a const +# reference rather than pass by value. + +cimport cython + +cdef extern from *: + cdef cppclass map "std::{{maybe_unordered}}map" [T, U]: + cppclass value_type: + T first + U second + cppclass const_iterator: + value_type& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + +@cname("{{cname}}") +cdef object {{cname}}(const map[X,Y]& s): + o = {} + cdef const map[X,Y].value_type *key_value + cdef map[X,Y].const_iterator iter = s.begin() + while iter != s.end(): + key_value = &cython.operator.dereference(iter) + o[key_value.first] = key_value.second + cython.operator.preincrement(iter) + return o + + +#################### complex.from_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + std_complex() + std_complex(T, T) except + + +@cname("{{cname}}") +cdef std_complex[X] {{cname}}(object o) except *: + cdef double complex z = o + return std_complex[X](z.real, z.imag) + + +#################### complex.to_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + X real() + X imag() + +@cname("{{cname}}") +cdef object {{cname}}(const std_complex[X]& z): + cdef double complex tmp + tmp.real = z.real() + tmp.imag = z.imag() + return tmp diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ExtensionTypes.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ExtensionTypes.c new file mode 100644 index 0000000000000000000000000000000000000000..544e50b9420c9bbfa08715ada0d52160d4dc7fe2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ExtensionTypes.c @@ -0,0 +1,660 @@ +/////////////// FixUpExtensionType.proto /////////////// + +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); /*proto*/ +#endif + +/////////////// FixUpExtensionType /////////////// +//@requires:ModuleSetupCode.c::IncludeStructmemberH +//@requires:StringTools.c::IncludeStringH + +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { +#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED_VAR(spec); + CYTHON_UNUSED_VAR(type); +#else + // Set tp_weakreflist, tp_dictoffset, tp_vectorcalloffset + // Copied and adapted from https://bugs.python.org/issue38140 + const PyType_Slot *slot = spec->slots; + while (slot && slot->slot && slot->slot != Py_tp_members) + slot++; + if (slot && slot->slot == Py_tp_members) { + int changed = 0; +#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) + const +#endif + PyMemberDef *memb = (PyMemberDef*) slot->pfunc; + while (memb && memb->name) { + if (memb->name[0] == '_' && memb->name[1] == '_') { +#if PY_VERSION_HEX < 0x030900b1 + if (strcmp(memb->name, "__weaklistoffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_weaklistoffset = memb->offset; + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } + else if (strcmp(memb->name, "__dictoffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_dictoffset = memb->offset; + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } +#if CYTHON_METH_FASTCALL + else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); +#if PY_VERSION_HEX >= 0x030800b4 + type->tp_vectorcall_offset = memb->offset; +#else + type->tp_print = (printfunc) memb->offset; +#endif + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } +#endif +#else + if ((0)); +#endif +#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON + else if (strcmp(memb->name, "__module__") == 0) { + // PyType_FromSpec() in CPython <= 3.9b1 overwrites this field with a constant string. + // See https://bugs.python.org/issue40703 + PyObject *descr; + // The PyMemberDef must be an object and normally readable, possibly writable. + assert(memb->type == T_OBJECT); + assert(memb->flags == 0 || memb->flags == READONLY); + descr = PyDescr_NewMember(type, memb); + if (unlikely(!descr)) + return -1; + if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { + Py_DECREF(descr); + return -1; + } + Py_DECREF(descr); + changed = 1; + } +#endif + } + memb++; + } + if (changed) + PyType_Modified(type); + } +#endif + return 0; +} +#endif + + +/////////////// ValidateBasesTuple.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); /*proto*/ +#endif + +/////////////// ValidateBasesTuple /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { + // Loop over all bases (except the first) and check that those + // really are heap types. Otherwise, it would not be safe to + // subclass them. + // + // We also check tp_dictoffset: it is unsafe to inherit + // tp_dictoffset from a base class because the object structures + // would not be compatible. So, if our extension type doesn't set + // tp_dictoffset (i.e. there is no __dict__ attribute in the object + // structure), we need to check that none of the base classes sets + // it either. + Py_ssize_t i, n; +#if CYTHON_ASSUME_SAFE_MACROS + n = PyTuple_GET_SIZE(bases); +#else + n = PyTuple_Size(bases); + if (n < 0) return -1; +#endif + for (i = 1; i < n; i++) /* Skip first base */ + { +#if CYTHON_AVOID_BORROWED_REFS + PyObject *b0 = PySequence_GetItem(bases, i); + if (!b0) return -1; +#elif CYTHON_ASSUME_SAFE_MACROS + PyObject *b0 = PyTuple_GET_ITEM(bases, i); +#else + PyObject *b0 = PyTuple_GetItem(bases, i); + if (!b0) return -1; +#endif + PyTypeObject *b; +#if PY_MAJOR_VERSION < 3 + /* Disallow old-style classes */ + if (PyClass_Check(b0)) + { + PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class", + PyString_AS_STRING(((PyClassObject*)b0)->cl_name)); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#endif + b = (PyTypeObject*) b0; + if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); + __Pyx_DECREF_TypeName(b_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + if (dictoffset == 0) + { + Py_ssize_t b_dictoffset = 0; +#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + b_dictoffset = b->tp_dictoffset; +#else + PyObject *py_b_dictoffset = PyObject_GetAttrString((PyObject*)b, "__dictoffset__"); + if (!py_b_dictoffset) goto dictoffset_return; + b_dictoffset = PyLong_AsSsize_t(py_b_dictoffset); + Py_DECREF(py_b_dictoffset); + if (b_dictoffset == -1 && PyErr_Occurred()) goto dictoffset_return; +#endif + if (b_dictoffset) { + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "extension type '%.200s' has no __dict__ slot, " + "but base type '" __Pyx_FMT_TYPENAME "' has: " + "either add 'cdef dict __dict__' to the extension type " + "or add '__slots__ = [...]' to the base type", + type_name, b_name); + __Pyx_DECREF_TypeName(b_name); + } +#if !(CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY) + dictoffset_return: +#endif +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + } +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + } + return 0; +} +#endif + + +/////////////// PyType_Ready.proto /////////////// + +// unused when using type specs +CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t);/*proto*/ + +/////////////// PyType_Ready /////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod0 +//@requires: ValidateBasesTuple + +// Wrapper around PyType_Ready() with some runtime checks and fixes +// to deal with multiple inheritance. +static int __Pyx_PyType_Ready(PyTypeObject *t) { + +// FIXME: is this really suitable for CYTHON_COMPILING_IN_LIMITED_API? +#if CYTHON_USE_TYPE_SPECS || !(CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API) || defined(PYSTON_MAJOR_VERSION) + // avoid C warning about unused helper function + (void)__Pyx_PyObject_CallMethod0; +#if CYTHON_USE_TYPE_SPECS + (void)__Pyx_validate_bases_tuple; +#endif + + return PyType_Ready(t); + +#else + int r; + PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); + if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) + return -1; + +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + { + // Make sure GC does not pick up our non-heap type as heap type with this hack! + // For details, see https://github.com/cython/cython/issues/3603 + int gc_was_enabled; + #if PY_VERSION_HEX >= 0x030A00b1 + // finally added in Py3.10 :) + gc_was_enabled = PyGC_Disable(); + (void)__Pyx_PyObject_CallMethod0; + + #else + // Call gc.disable() as a backwards compatible fallback, but only if needed. + PyObject *ret, *py_status; + PyObject *gc = NULL; + #if PY_VERSION_HEX >= 0x030700a1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) + // https://foss.heptapod.net/pypy/pypy/-/issues/3385 + gc = PyImport_GetModule(PYUNICODE("gc")); + #endif + if (unlikely(!gc)) gc = PyImport_Import(PYUNICODE("gc")); + if (unlikely(!gc)) return -1; + py_status = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("isenabled")); + if (unlikely(!py_status)) { + Py_DECREF(gc); + return -1; + } + gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); + Py_DECREF(py_status); + if (gc_was_enabled > 0) { + ret = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("disable")); + if (unlikely(!ret)) { + Py_DECREF(gc); + return -1; + } + Py_DECREF(ret); + } else if (unlikely(gc_was_enabled == -1)) { + Py_DECREF(gc); + return -1; + } + #endif + + // As of https://bugs.python.org/issue22079 + // PyType_Ready enforces that all bases of a non-heap type are + // non-heap. We know that this is the case for the solid base but + // other bases are heap allocated and are kept alive through the + // tp_bases reference. + // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused + // in PyType_Ready(). + t->tp_flags |= Py_TPFLAGS_HEAPTYPE; +#if PY_VERSION_HEX >= 0x030A0000 + // As of https://github.com/python/cpython/pull/25520 + // PyType_Ready marks types as immutable if they are static types + // and requires the Py_TPFLAGS_IMMUTABLETYPE flag to mark types as + // immutable + // Manually set the Py_TPFLAGS_IMMUTABLETYPE flag, since the type + // is immutable + t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE; +#endif +#else + // avoid C warning about unused helper function + (void)__Pyx_PyObject_CallMethod0; +#endif + + r = PyType_Ready(t); + +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; + + #if PY_VERSION_HEX >= 0x030A00b1 + if (gc_was_enabled) + PyGC_Enable(); + #else + if (gc_was_enabled) { + PyObject *tp, *v, *tb; + PyErr_Fetch(&tp, &v, &tb); + ret = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("enable")); + if (likely(ret || r == -1)) { + Py_XDECREF(ret); + // do not overwrite exceptions raised by PyType_Ready() above + PyErr_Restore(tp, v, tb); + } else { + // PyType_Ready() succeeded, but gc.enable() failed. + Py_XDECREF(tp); + Py_XDECREF(v); + Py_XDECREF(tb); + r = -1; + } + } + Py_DECREF(gc); + #endif + } +#endif + + return r; +#endif +} + + +/////////////// PyTrashcan.proto /////////////// + +// These macros are taken from https://github.com/python/cpython/pull/11841 +// Unlike the Py_TRASHCAN_SAFE_BEGIN/Py_TRASHCAN_SAFE_END macros, they +// allow dealing correctly with subclasses. + +// This requires CPython version >= 2.7.4 +// (or >= 3.2.4 but we don't support such old Python 3 versions anyway) +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03080000 +// https://github.com/python/cpython/pull/11841 merged so Cython reimplementation +// is no longer necessary +#define __Pyx_TRASHCAN_BEGIN Py_TRASHCAN_BEGIN +#define __Pyx_TRASHCAN_END Py_TRASHCAN_END +#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070400 +#define __Pyx_TRASHCAN_BEGIN_CONDITION(op, cond) \ + do { \ + PyThreadState *_tstate = NULL; \ + // If "cond" is false, then _tstate remains NULL and the deallocator + // is run normally without involving the trashcan + if (cond) { \ + _tstate = PyThreadState_GET(); \ + if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \ + // Store the object (to be deallocated later) and jump past + // Py_TRASHCAN_END, skipping the body of the deallocator + _PyTrash_thread_deposit_object((PyObject*)(op)); \ + break; \ + } \ + ++_tstate->trash_delete_nesting; \ + } + // The body of the deallocator is here. +#define __Pyx_TRASHCAN_END \ + if (_tstate) { \ + --_tstate->trash_delete_nesting; \ + if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \ + _PyTrash_thread_destroy_chain(); \ + } \ + } while (0); + +#define __Pyx_TRASHCAN_BEGIN(op, dealloc) __Pyx_TRASHCAN_BEGIN_CONDITION(op, \ + __Pyx_PyObject_GetSlot(op, tp_dealloc, destructor) == (destructor)(dealloc)) + +#else +// The trashcan is a no-op on other Python implementations +// or old CPython versions +#define __Pyx_TRASHCAN_BEGIN(op, dealloc) +#define __Pyx_TRASHCAN_END +#endif + +/////////////// CallNextTpDealloc.proto /////////////// + +static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc); + +/////////////// CallNextTpDealloc /////////////// + +static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc) { + PyTypeObject* type = Py_TYPE(obj); + destructor tp_dealloc = NULL; + /* try to find the first parent type that has a different tp_dealloc() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_dealloc, destructor) != current_tp_dealloc) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_dealloc = __Pyx_PyType_GetSlot(type, tp_dealloc, destructor)) == current_tp_dealloc) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type) + tp_dealloc(obj); +} + +/////////////// CallNextTpTraverse.proto /////////////// + +static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse); + +/////////////// CallNextTpTraverse /////////////// + +static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) { + PyTypeObject* type = Py_TYPE(obj); + traverseproc tp_traverse = NULL; + /* try to find the first parent type that has a different tp_traverse() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_traverse, traverseproc) != current_tp_traverse) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_traverse = __Pyx_PyType_GetSlot(type, tp_traverse, traverseproc)) == current_tp_traverse) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type && tp_traverse) + return tp_traverse(obj, v, a); + // FIXME: really ignore? + return 0; +} + +/////////////// CallNextTpClear.proto /////////////// + +static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear); + +/////////////// CallNextTpClear /////////////// + +static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) { + PyTypeObject* type = Py_TYPE(obj); + inquiry tp_clear = NULL; + /* try to find the first parent type that has a different tp_clear() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_clear, inquiry) != current_tp_clear) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_clear = __Pyx_PyType_GetSlot(type, tp_clear, inquiry)) == current_tp_clear) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type && tp_clear) + tp_clear(obj); +} + +/////////////// SetupReduce.proto /////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce(PyObject* type_obj); +#endif + +/////////////// SetupReduce /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@substitute: naming + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + + name_attr = __Pyx_PyObject_GetAttrStrNoError(meth, PYIDENT("__name__")); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + + Py_XDECREF(name_attr); + return ret; +} + +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_getstate = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; + PyObject *getstate = NULL; + +#if CYTHON_USE_PYTYPE_LOOKUP + getstate = _PyType_Lookup((PyTypeObject*)type_obj, PYIDENT("__getstate__")); +#else + getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__getstate__")); + if (!getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (getstate) { + // Python 3.11 introduces object.__getstate__. Because it's version-specific failure to find it should not be an error +#if CYTHON_USE_PYTYPE_LOOKUP + object_getstate = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__getstate__")); +#else + object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, PYIDENT("__getstate__")); + if (!object_getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (object_getstate != getstate) { + goto __PYX_GOOD; + } + } + +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD; +#endif + + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce_ex__")); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { + +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce__")); if (unlikely(!reduce)) goto __PYX_BAD; + + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, PYIDENT("__reduce_cython__"))) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__reduce_cython__")); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce__"), reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + // Ignore if we're done, i.e. if 'reduce' already has the right name and the original is gone. + // Otherwise: error. + goto __PYX_BAD; + } + + setstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate__")); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, PYIDENT("__setstate_cython__"))) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate_cython__")); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate__"), setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + // Ignore if we're done, i.e. if 'setstate' already has the right name and the original is gone. + // Otherwise: error. + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; + +__PYX_BAD: + if (!PyErr_Occurred()) { + __Pyx_TypeName type_obj_name = + __Pyx_PyType_GetName((PyTypeObject*)type_obj); + PyErr_Format(PyExc_RuntimeError, + "Unable to initialize pickling for " __Pyx_FMT_TYPENAME, type_obj_name); + __Pyx_DECREF_TypeName(type_obj_name); + } + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); + Py_XDECREF(object_getstate); + Py_XDECREF(getstate); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} +#endif + + +/////////////// BinopSlot /////////////// + +static CYTHON_INLINE PyObject *{{func_name}}_maybe_call_slot(PyTypeObject* type, PyObject *left, PyObject *right {{extra_arg_decl}}) { + {{slot_type}} slot; +#if CYTHON_USE_TYPE_SLOTS || PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY + slot = type->tp_as_number ? type->tp_as_number->{{slot_name}} : NULL; +#else + slot = ({{slot_type}}) PyType_GetSlot(type, Py_{{slot_name}}); +#endif + return slot ? slot(left, right {{extra_arg}}) : __Pyx_NewRef(Py_NotImplemented); +} + +static PyObject *{{func_name}}(PyObject *left, PyObject *right {{extra_arg_decl}}) { + int maybe_self_is_left, maybe_self_is_right = 0; + maybe_self_is_left = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(left)->tp_as_number && Py_TYPE(left)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || __Pyx_TypeCheck(left, {{type_cname}}); + + // Optimize for the common case where the left operation is defined (and successful). + {{if not overloads_left}} + maybe_self_is_right = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(right)->tp_as_number && Py_TYPE(right)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || __Pyx_TypeCheck(right, {{type_cname}}); + {{endif}} + + if (maybe_self_is_left) { + PyObject *res; + + {{if overloads_right and not overloads_left}} + if (maybe_self_is_right) { + res = {{call_right}}; + if (res != Py_NotImplemented) return res; + Py_DECREF(res); + // Don't bother calling it again. + maybe_self_is_right = 0; + } + {{endif}} + + res = {{call_left}}; + if (res != Py_NotImplemented) return res; + Py_DECREF(res); + } + + {{if overloads_left}} + maybe_self_is_right = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(right)->tp_as_number && Py_TYPE(right)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || PyType_IsSubtype(Py_TYPE(right), {{type_cname}}); + {{endif}} + + if (maybe_self_is_right) { + return {{call_right}}; + } + return __Pyx_NewRef(Py_NotImplemented); +} + +/////////////// ValidateExternBase.proto /////////////// + +static int __Pyx_validate_extern_base(PyTypeObject *base); /* proto */ + +/////////////// ValidateExternBase /////////////// +//@requires: ObjectHandling.c::FormatTypeName + +static int __Pyx_validate_extern_base(PyTypeObject *base) { + Py_ssize_t itemsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_itemsize; +#endif +#if !CYTHON_COMPILING_IN_LIMITED_API + itemsize = ((PyTypeObject *)base)->tp_itemsize; +#else + py_itemsize = PyObject_GetAttrString((PyObject*)base, "__itemsize__"); + if (!py_itemsize) + return -1; + itemsize = PyLong_AsSsize_t(py_itemsize); + Py_DECREF(py_itemsize); + py_itemsize = 0; + if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred()) + return -1; +#endif + if (itemsize) { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(base); + PyErr_Format(PyExc_TypeError, + "inheritance from PyVarObject types like '" __Pyx_FMT_TYPENAME "' not currently supported", b_name); + __Pyx_DECREF_TypeName(b_name); + return -1; + } + return 0; +} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/FunctionArguments.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/FunctionArguments.c new file mode 100644 index 0000000000000000000000000000000000000000..961fbc26ef8b5910e6ee6452aba60b24254ed19b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/FunctionArguments.c @@ -0,0 +1,587 @@ +//////////////////// ArgTypeTest.proto //////////////////// + + +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact) \ + ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 : \ + __Pyx__ArgTypeTest(obj, type, name, exact)) + +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /*proto*/ + +//////////////////// ArgTypeTest //////////////////// + +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + __Pyx_TypeName type_name; + __Pyx_TypeName obj_type_name; + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + type_name = __Pyx_PyType_GetName(type); + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME + ", got " __Pyx_FMT_TYPENAME ")", name, type_name, obj_type_name); + __Pyx_DECREF_TypeName(type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return 0; +} + +//////////////////// RaiseArgTupleInvalid.proto //////////////////// + +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ + +//////////////////// RaiseArgTupleInvalid //////////////////// + +// __Pyx_RaiseArgtupleInvalid raises the correct exception when too +// many or too few positional arguments were found. This handles +// Py_ssize_t formatting correctly. + +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + + +//////////////////// RaiseKeywordRequired.proto //////////////////// + +static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name); /*proto*/ + +//////////////////// RaiseKeywordRequired //////////////////// + +static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name) { + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() needs keyword-only argument %U", func_name, kw_name); + #else + "%s() needs keyword-only argument %s", func_name, + PyString_AS_STRING(kw_name)); + #endif +} + + +//////////////////// RaiseDoubleKeywords.proto //////////////////// + +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ + +//////////////////// RaiseDoubleKeywords //////////////////// + +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + + +//////////////////// RaiseMappingExpected.proto //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/ + +//////////////////// RaiseMappingExpected //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg) { + __Pyx_TypeName arg_type_name = __Pyx_PyType_GetName(Py_TYPE(arg)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is not a mapping", arg_type_name); + __Pyx_DECREF_TypeName(arg_type_name); +} + + +//////////////////// KeywordStringCheck.proto //////////////////// + +static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed); /*proto*/ + +//////////////////// KeywordStringCheck //////////////////// + +// __Pyx_CheckKeywordStrings raises an error if non-string keywords +// were passed to a function, or if any keywords were passed to a +// function that does not accept them. +// +// The "kw" argument is either a dict (for METH_VARARGS) or a tuple +// (for METH_FASTCALL). + +static int __Pyx_CheckKeywordStrings( + PyObject *kw, + const char* function_name, + int kw_allowed) +{ + PyObject* key = 0; + Py_ssize_t pos = 0; +#if CYTHON_COMPILING_IN_PYPY + /* PyPy appears to check keywords at call time, not at unpacking time => not much to do here */ + if (!kw_allowed && PyDict_Next(kw, &pos, &key, 0)) + goto invalid_keyword; + return 1; +#else + if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) { + Py_ssize_t kwsize; +#if CYTHON_ASSUME_SAFE_MACROS + kwsize = PyTuple_GET_SIZE(kw); +#else + kwsize = PyTuple_Size(kw); + if (kwsize < 0) return 0; +#endif + if (unlikely(kwsize == 0)) + return 1; + if (!kw_allowed) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, 0); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + goto invalid_keyword; + } +#if PY_VERSION_HEX < 0x03090000 + // On CPython >= 3.9, the FASTCALL protocol guarantees that keyword + // names are strings (see https://bugs.python.org/issue37540) + for (pos = 0; pos < kwsize; pos++) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, pos); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } +#endif + return 1; + } + + while (PyDict_Next(kw, &pos, &key, 0)) { + #if PY_MAJOR_VERSION < 3 + if (unlikely(!PyString_Check(key))) + #endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } + if (!kw_allowed && unlikely(key)) + goto invalid_keyword; + return 1; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + return 0; +#endif +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif + return 0; +} + + +//////////////////// ParseKeywords.proto //////////////////// + +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, + const char* function_name); /*proto*/ + +//////////////////// ParseKeywords //////////////////// +//@requires: RaiseDoubleKeywords + +// __Pyx_ParseOptionalKeywords copies the optional/unknown keyword +// arguments from kwds into the dict kwds2. If kwds2 is NULL, unknown +// keywords will raise an invalid keyword error. +// +// When not using METH_FASTCALL, kwds is a dict and kwvalues is NULL. +// Otherwise, kwds is a tuple with keyword names and kwvalues is a C +// array with the corresponding values. +// +// Three kinds of errors are checked: 1) non-string keywords, 2) +// unexpected keywords and 3) overlap with positional arguments. +// +// If num_posargs is greater 0, it denotes the number of positional +// arguments that were passed and that must therefore not appear +// amongst the keywords as well. +// +// This method does not check for required keyword arguments. + +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); + + while (1) { + // clean up key and value when the loop is "continued" + Py_XDECREF(key); key = NULL; + Py_XDECREF(value); value = NULL; + + if (kwds_is_tuple) { + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(kwds); +#else + size = PyTuple_Size(kwds); + if (size < 0) goto bad; +#endif + if (pos >= size) break; + +#if CYTHON_AVOID_BORROWED_REFS + // Get an owned reference to key. + key = __Pyx_PySequence_ITEM(kwds, pos); + if (!key) goto bad; +#elif CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kwds, pos); +#else + key = PyTuple_GetItem(kwds, pos); + if (!key) goto bad; +#endif + + value = kwvalues[pos]; + pos++; + } + else + { + if (!PyDict_Next(kwds, &pos, &key, &value)) break; + // It's unfortunately hard to avoid borrowed references (briefly) with PyDict_Next +#if CYTHON_AVOID_BORROWED_REFS + // Own the reference to match the behaviour above. + Py_INCREF(key); +#endif + } + + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(value); /* transfer ownership of value to values */ + Py_DECREF(key); +#endif + key = NULL; + value = NULL; + continue; + } + + // Now make sure we own both references since we're doing non-trivial Python operations. +#if !CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + Py_INCREF(value); + + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; /* ownership transferred to values */ +#endif + break; + } + name++; + } + if (*name) continue; + else { + // not found after positional args, check for duplicate + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = ( + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + // In Py2, we may need to convert the argument name from str to unicode for comparison. + PyUnicode_Compare(**name, key) + ); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; /* ownership transferred to values */ +#endif + break; + } + name++; + } + if (*name) continue; + else { + // not found after positional args, check for duplicate + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + // need to convert argument name from bytes to unicode for comparison + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + Py_XDECREF(key); + Py_XDECREF(value); + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + Py_XDECREF(key); + Py_XDECREF(value); + return -1; +} + + +//////////////////// MergeKeywords.proto //////////////////// + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/ + +//////////////////// MergeKeywords //////////////////// +//@requires: RaiseDoubleKeywords +//@requires: Optimize.c::dict_iter + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) { + PyObject *iter, *key = NULL, *value = NULL; + int source_is_dict, result; + Py_ssize_t orig_length, ppos = 0; + + iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict); + if (unlikely(!iter)) { + // slow fallback: try converting to dict, then iterate + PyObject *args; + if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) goto bad; + PyErr_Clear(); + args = PyTuple_Pack(1, source_mapping); + if (likely(args)) { + PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL); + Py_DECREF(args); + if (likely(fallback)) { + iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict); + Py_DECREF(fallback); + } + } + if (unlikely(!iter)) goto bad; + } + + while (1) { + result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict); + if (unlikely(result < 0)) goto bad; + if (!result) break; + + if (unlikely(PyDict_Contains(kwdict, key))) { + __Pyx_RaiseDoubleKeywordsError("function", key); + result = -1; + } else { + result = PyDict_SetItem(kwdict, key, value); + } + Py_DECREF(key); + Py_DECREF(value); + if (unlikely(result < 0)) goto bad; + } + Py_XDECREF(iter); + return 0; + +bad: + Py_XDECREF(iter); + return -1; +} + + +/////////////// fastcall.proto /////////////// + +// We define various functions and macros with two variants: +//..._FASTCALL and ..._VARARGS + +// The first is used when METH_FASTCALL is enabled and the second is used +// otherwise. If the Python implementation does not support METH_FASTCALL +// (because it's an old version of CPython or it's not CPython at all), +// then the ..._FASTCALL macros simply alias ..._VARARGS + +#if CYTHON_AVOID_BORROWED_REFS + // This is the only case where we request an owned reference. + #define __Pyx_Arg_VARARGS(args, i) PySequence_GetItem(args, i) +#elif CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) +#else + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GetItem(args, i) +#endif +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_NewRef_VARARGS(arg) __Pyx_NewRef(arg) + #define __Pyx_Arg_XDECREF_VARARGS(arg) Py_XDECREF(arg) +#else + #define __Pyx_Arg_NewRef_VARARGS(arg) arg /* no-op */ + #define __Pyx_Arg_XDECREF_VARARGS(arg) /* no-op - arg is borrowed */ +#endif +#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) +#define __Pyx_KwValues_VARARGS(args, nargs) NULL +#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) +#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) +#if CYTHON_METH_FASTCALL + #define __Pyx_Arg_FASTCALL(args, i) args[i] + #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) + static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues);/*proto*/ + #else + #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) + #endif + #define __Pyx_Arg_NewRef_FASTCALL(arg) arg /* no-op, __Pyx_Arg_FASTCALL is direct and this needs + to have the same reference counting */ + #define __Pyx_Arg_XDECREF_FASTCALL(arg) /* no-op - arg was returned from array */ +#else + #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS + #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS + #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS + #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS + #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS + #define __Pyx_Arg_NewRef_FASTCALL(arg) __Pyx_Arg_NewRef_VARARGS(arg) + #define __Pyx_Arg_XDECREF_FASTCALL(arg) __Pyx_Arg_XDECREF_VARARGS(arg) +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) +#else +/* Not CPython, so certainly no METH_FASTCALL support */ +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) +#endif + + +/////////////// fastcall /////////////// +//@requires: ObjectHandling.c::TupleAndListFromArray +//@requires: StringTools.c::UnicodeEquals + +#if CYTHON_METH_FASTCALL +// kwnames: tuple with names of keyword arguments +// kwvalues: C array with values of keyword arguments +// s: str with the keyword name to look for +static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) +{ + // Search the kwnames array for s and return the corresponding value. + // We do two loops: a first one to compare pointers (which will find a + // match if the name in kwnames is interned, given that s is interned + // by Cython). A second loop compares the actual strings. + Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); + for (i = 0; i < n; i++) + { + if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; + } + for (i = 0; i < n; i++) + { + int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); + if (unlikely(eq != 0)) { + if (unlikely(eq < 0)) return NULL; /* error */ + return kwvalues[i]; + } + } + return NULL; /* not found (no exception set) */ +} + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 +CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues) { + Py_ssize_t i, nkwargs = PyTuple_GET_SIZE(kwnames); + PyObject *dict; + + dict = PyDict_New(); + if (unlikely(!dict)) + return NULL; + + for (i=0; i": + void *memset(void *b, int c, size_t len) + +cdef extern from *: + bint __PYX_CYTHON_ATOMICS_ENABLED() + int __Pyx_GetBuffer(object, Py_buffer *, int) except -1 + void __Pyx_ReleaseBuffer(Py_buffer *) + + ctypedef struct PyObject + ctypedef Py_ssize_t Py_intptr_t + void Py_INCREF(PyObject *) + void Py_DECREF(PyObject *) + + void* PyMem_Malloc(size_t n) + void PyMem_Free(void *p) + void* PyObject_Malloc(size_t n) + void PyObject_Free(void *p) + + cdef struct __pyx_memoryview "__pyx_memoryview_obj": + Py_buffer view + PyObject *obj + __Pyx_TypeInfo *typeinfo + + ctypedef struct {{memviewslice_name}}: + __pyx_memoryview *memview + char *data + Py_ssize_t shape[{{max_dims}}] + Py_ssize_t strides[{{max_dims}}] + Py_ssize_t suboffsets[{{max_dims}}] + + void __PYX_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil) + void __PYX_XCLEAR_MEMVIEW({{memviewslice_name}} *memslice, int have_gil) + + ctypedef struct __pyx_buffer "Py_buffer": + PyObject *obj + + PyObject *Py_None + + cdef enum: + PyBUF_C_CONTIGUOUS, + PyBUF_F_CONTIGUOUS, + PyBUF_ANY_CONTIGUOUS + PyBUF_FORMAT + PyBUF_WRITABLE + PyBUF_STRIDES + PyBUF_INDIRECT + PyBUF_ND + PyBUF_RECORDS + PyBUF_RECORDS_RO + + ctypedef struct __Pyx_TypeInfo: + pass + +cdef extern from *: + ctypedef int __pyx_atomic_int_type + {{memviewslice_name}} slice_copy_contig "__pyx_memoryview_copy_new_contig"( + __Pyx_memviewslice *from_mvs, + char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + bint dtype_is_object) except * nogil + bint slice_is_contig "__pyx_memviewslice_is_contig" ( + {{memviewslice_name}} mvs, char order, int ndim) nogil + bint slices_overlap "__pyx_slices_overlap" ({{memviewslice_name}} *slice1, + {{memviewslice_name}} *slice2, + int ndim, size_t itemsize) nogil + + +cdef extern from "": + void *malloc(size_t) nogil + void free(void *) nogil + void *memcpy(void *dest, void *src, size_t n) nogil + +# the sequence abstract base class +cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" +try: + if __import__("sys").version_info >= (3, 3): + __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + else: + __pyx_collections_abc_Sequence = __import__("collections").Sequence +except: + # it isn't a big problem if this fails + __pyx_collections_abc_Sequence = None + +# +### cython.array class +# + +@cython.collection_type("sequence") +@cname("__pyx_array") +cdef class array: + + cdef: + char *data + Py_ssize_t len + char *format + int ndim + Py_ssize_t *_shape + Py_ssize_t *_strides + Py_ssize_t itemsize + unicode mode # FIXME: this should have been a simple 'char' + bytes _format + void (*callback_free_data)(void *data) noexcept + # cdef object _memview + cdef bint free_data + cdef bint dtype_is_object + + def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + mode="c", bint allocate_buffer=True): + + cdef int idx + cdef Py_ssize_t dim + + self.ndim = len(shape) + self.itemsize = itemsize + + if not self.ndim: + raise ValueError, "Empty shape tuple for cython.array" + + if itemsize <= 0: + raise ValueError, "itemsize <= 0 for cython.array" + + if not isinstance(format, bytes): + format = format.encode('ASCII') + self._format = format # keep a reference to the byte string + self.format = self._format + + # use single malloc() for both shape and strides + self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + self._strides = self._shape + self.ndim + + if not self._shape: + raise MemoryError, "unable to allocate shape and strides." + + # cdef Py_ssize_t dim, stride + for idx, dim in enumerate(shape): + if dim <= 0: + raise ValueError, f"Invalid shape in axis {idx}: {dim}." + self._shape[idx] = dim + + cdef char order + if mode == 'c': + order = b'C' + self.mode = u'c' + elif mode == 'fortran': + order = b'F' + self.mode = u'fortran' + else: + raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" + + self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) + + self.free_data = allocate_buffer + self.dtype_is_object = format == b'O' + + if allocate_buffer: + _allocate_buffer(self) + + @cname('getbuffer') + def __getbuffer__(self, Py_buffer *info, int flags): + cdef int bufmode = -1 + if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + if self.mode == u"c": + bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + elif self.mode == u"fortran": + bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + if not (flags & bufmode): + raise ValueError, "Can only create a buffer that is contiguous in memory." + info.buf = self.data + info.len = self.len + + if flags & PyBUF_STRIDES: + info.ndim = self.ndim + info.shape = self._shape + info.strides = self._strides + else: + info.ndim = 1 + info.shape = &self.len if flags & PyBUF_ND else NULL + info.strides = NULL + + info.suboffsets = NULL + info.itemsize = self.itemsize + info.readonly = 0 + info.format = self.format if flags & PyBUF_FORMAT else NULL + info.obj = self + + def __dealloc__(array self): + if self.callback_free_data != NULL: + self.callback_free_data(self.data) + elif self.free_data and self.data is not NULL: + if self.dtype_is_object: + refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + free(self.data) + PyObject_Free(self._shape) + + @property + def memview(self): + return self.get_memview() + + @cname('get_memview') + cdef get_memview(self): + flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + return memoryview(self, flags, self.dtype_is_object) + + def __len__(self): + return self._shape[0] + + def __getattr__(self, attr): + return getattr(self.memview, attr) + + def __getitem__(self, item): + return self.memview[item] + + def __setitem__(self, item, value): + self.memview[item] = value + + # Sequence methods + try: + count = __pyx_collections_abc_Sequence.count + index = __pyx_collections_abc_Sequence.index + except: + pass + +@cname("__pyx_array_allocate_buffer") +cdef int _allocate_buffer(array self) except -1: + # use malloc() for backwards compatibility + # in case external code wants to change the data pointer + cdef Py_ssize_t i + cdef PyObject **p + + self.free_data = True + self.data = malloc(self.len) + if not self.data: + raise MemoryError, "unable to allocate array data." + + if self.dtype_is_object: + p = self.data + for i in range(self.len // self.itemsize): + p[i] = Py_None + Py_INCREF(Py_None) + return 0 + + +@cname("__pyx_array_new") +cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): + cdef array result + cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + + if buf is NULL: + result = array.__new__(array, shape, itemsize, format, mode) + else: + result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) + result.data = buf + + return result + + +# +### Memoryview constants and cython.view.memoryview class +# + +# Disable generic_contiguous, as it makes trouble verifying contiguity: +# - 'contiguous' or '::1' means the dimension is contiguous with dtype +# - 'indirect_contiguous' means a contiguous list of pointers +# - dtype contiguous must be contiguous in the first or last dimension +# from the start, or from the dimension following the last indirect dimension +# +# e.g. +# int[::indirect_contiguous, ::contiguous, :] +# +# is valid (list of pointers to 2d fortran-contiguous array), but +# +# int[::generic_contiguous, ::contiguous, :] +# +# would mean you'd have assert dimension 0 to be indirect (and pointer contiguous) at runtime. +# So it doesn't bring any performance benefit, and it's only confusing. + +@cname('__pyx_MemviewEnum') +cdef class Enum(object): + cdef object name + def __init__(self, name): + self.name = name + def __repr__(self): + return self.name + +cdef generic = Enum("") +cdef strided = Enum("") # default +cdef indirect = Enum("") +# Disable generic_contiguous, as it is a troublemaker +#cdef generic_contiguous = Enum("") +cdef contiguous = Enum("") +cdef indirect_contiguous = Enum("") + +# 'follow' is implied when the first or last axis is ::1 + + +# pre-allocate thread locks for reuse +## note that this could be implemented in a more beautiful way in "normal" Cython, +## but this code gets merged into the user module and not everything works there. +cdef int __pyx_memoryview_thread_locks_used = 0 +cdef PyThread_type_lock[{{THREAD_LOCKS_PREALLOCATED}}] __pyx_memoryview_thread_locks = [ +{{for _ in range(THREAD_LOCKS_PREALLOCATED)}} + PyThread_allocate_lock(), +{{endfor}} +] + + +@cname('__pyx_memoryview') +cdef class memoryview: + + cdef object obj + cdef object _size + cdef object _array_interface + cdef PyThread_type_lock lock + cdef __pyx_atomic_int_type acquisition_count + cdef Py_buffer view + cdef int flags + cdef bint dtype_is_object + cdef __Pyx_TypeInfo *typeinfo + + def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + self.obj = obj + self.flags = flags + if type(self) is memoryview or obj is not None: + __Pyx_GetBuffer(obj, &self.view, flags) + if self.view.obj == NULL: + (<__pyx_buffer *> &self.view).obj = Py_None + Py_INCREF(Py_None) + + if not __PYX_CYTHON_ATOMICS_ENABLED(): + global __pyx_memoryview_thread_locks_used + if __pyx_memoryview_thread_locks_used < {{THREAD_LOCKS_PREALLOCATED}}: + self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + __pyx_memoryview_thread_locks_used += 1 + if self.lock is NULL: + self.lock = PyThread_allocate_lock() + if self.lock is NULL: + raise MemoryError + + if flags & PyBUF_FORMAT: + self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + else: + self.dtype_is_object = dtype_is_object + + assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 + self.typeinfo = NULL + + def __dealloc__(memoryview self): + if self.obj is not None: + __Pyx_ReleaseBuffer(&self.view) + elif (<__pyx_buffer *> &self.view).obj == Py_None: + # Undo the incref in __cinit__() above. + (<__pyx_buffer *> &self.view).obj = NULL + Py_DECREF(Py_None) + + cdef int i + global __pyx_memoryview_thread_locks_used + if self.lock != NULL: + for i in range(__pyx_memoryview_thread_locks_used): + if __pyx_memoryview_thread_locks[i] is self.lock: + __pyx_memoryview_thread_locks_used -= 1 + if i != __pyx_memoryview_thread_locks_used: + __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + break + else: + PyThread_free_lock(self.lock) + + cdef char *get_item_pointer(memoryview self, object index) except NULL: + cdef Py_ssize_t dim + cdef char *itemp = self.view.buf + + for dim, idx in enumerate(index): + itemp = pybuffer_index(&self.view, itemp, idx, dim) + + return itemp + + #@cname('__pyx_memoryview_getitem') + def __getitem__(memoryview self, object index): + if index is Ellipsis: + return self + + have_slices, indices = _unellipsify(index, self.view.ndim) + + cdef char *itemp + if have_slices: + return memview_slice(self, indices) + else: + itemp = self.get_item_pointer(indices) + return self.convert_item_to_object(itemp) + + def __setitem__(memoryview self, object index, object value): + if self.view.readonly: + raise TypeError, "Cannot assign to read-only memoryview" + + have_slices, index = _unellipsify(index, self.view.ndim) + + if have_slices: + obj = self.is_slice(value) + if obj is not None: + self.setitem_slice_assignment(self[index], obj) + else: + self.setitem_slice_assign_scalar(self[index], value) + else: + self.setitem_indexed(index, value) + + cdef is_slice(self, obj): + if not isinstance(obj, memoryview): + try: + obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + self.dtype_is_object) + except TypeError: + return None + + return obj + + cdef setitem_slice_assignment(self, dst, src): + cdef {{memviewslice_name}} dst_slice + cdef {{memviewslice_name}} src_slice + cdef {{memviewslice_name}} msrc = get_slice_from_memview(src, &src_slice)[0] + cdef {{memviewslice_name}} mdst = get_slice_from_memview(dst, &dst_slice)[0] + + memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) + + cdef setitem_slice_assign_scalar(self, memoryview dst, value): + cdef int array[128] + cdef void *tmp = NULL + cdef void *item + + cdef {{memviewslice_name}} *dst_slice + cdef {{memviewslice_name}} tmp_slice + dst_slice = get_slice_from_memview(dst, &tmp_slice) + + if self.view.itemsize > sizeof(array): + tmp = PyMem_Malloc(self.view.itemsize) + if tmp == NULL: + raise MemoryError + item = tmp + else: + item = array + + try: + if self.dtype_is_object: + ( item)[0] = value + else: + self.assign_item_from_object( item, value) + + # It would be easy to support indirect dimensions, but it's easier + # to disallow :) + if self.view.suboffsets != NULL: + assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + item, self.dtype_is_object) + finally: + PyMem_Free(tmp) + + cdef setitem_indexed(self, index, value): + cdef char *itemp = self.get_item_pointer(index) + self.assign_item_from_object(itemp, value) + + cdef convert_item_to_object(self, char *itemp): + """Only used if instantiated manually by the user, or if Cython doesn't + know how to convert the type""" + import struct + cdef bytes bytesitem + # Do a manual and complete check here instead of this easy hack + bytesitem = itemp[:self.view.itemsize] + try: + result = struct.unpack(self.view.format, bytesitem) + except struct.error: + raise ValueError, "Unable to convert item to object" + else: + if len(self.view.format) == 1: + return result[0] + return result + + cdef assign_item_from_object(self, char *itemp, object value): + """Only used if instantiated manually by the user, or if Cython doesn't + know how to convert the type""" + import struct + cdef char c + cdef bytes bytesvalue + cdef Py_ssize_t i + + if isinstance(value, tuple): + bytesvalue = struct.pack(self.view.format, *value) + else: + bytesvalue = struct.pack(self.view.format, value) + + for i, c in enumerate(bytesvalue): + itemp[i] = c + + @cname('getbuffer') + def __getbuffer__(self, Py_buffer *info, int flags): + if flags & PyBUF_WRITABLE and self.view.readonly: + raise ValueError, "Cannot create writable memory view from read-only memoryview" + + if flags & PyBUF_ND: + info.shape = self.view.shape + else: + info.shape = NULL + + if flags & PyBUF_STRIDES: + info.strides = self.view.strides + else: + info.strides = NULL + + if flags & PyBUF_INDIRECT: + info.suboffsets = self.view.suboffsets + else: + info.suboffsets = NULL + + if flags & PyBUF_FORMAT: + info.format = self.view.format + else: + info.format = NULL + + info.buf = self.view.buf + info.ndim = self.view.ndim + info.itemsize = self.view.itemsize + info.len = self.view.len + info.readonly = self.view.readonly + info.obj = self + + # Some properties that have the same semantics as in NumPy + @property + def T(self): + cdef _memoryviewslice result = memoryview_copy(self) + transpose_memslice(&result.from_slice) + return result + + @property + def base(self): + return self._get_base() + + cdef _get_base(self): + return self.obj + + @property + def shape(self): + return tuple([length for length in self.view.shape[:self.view.ndim]]) + + @property + def strides(self): + if self.view.strides == NULL: + # Note: we always ask for strides, so if this is not set it's a bug + raise ValueError, "Buffer view does not expose strides" + + return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + + @property + def suboffsets(self): + if self.view.suboffsets == NULL: + return (-1,) * self.view.ndim + + return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + + @property + def ndim(self): + return self.view.ndim + + @property + def itemsize(self): + return self.view.itemsize + + @property + def nbytes(self): + return self.size * self.view.itemsize + + @property + def size(self): + if self._size is None: + result = 1 + + for length in self.view.shape[:self.view.ndim]: + result *= length + + self._size = result + + return self._size + + def __len__(self): + if self.view.ndim >= 1: + return self.view.shape[0] + + return 0 + + def __repr__(self): + return "" % (self.base.__class__.__name__, + id(self)) + + def __str__(self): + return "" % (self.base.__class__.__name__,) + + # Support the same attributes as memoryview slices + def is_c_contig(self): + cdef {{memviewslice_name}} *mslice + cdef {{memviewslice_name}} tmp + mslice = get_slice_from_memview(self, &tmp) + return slice_is_contig(mslice[0], 'C', self.view.ndim) + + def is_f_contig(self): + cdef {{memviewslice_name}} *mslice + cdef {{memviewslice_name}} tmp + mslice = get_slice_from_memview(self, &tmp) + return slice_is_contig(mslice[0], 'F', self.view.ndim) + + def copy(self): + cdef {{memviewslice_name}} mslice + cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + + slice_copy(self, &mslice) + mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + self.view.itemsize, + flags|PyBUF_C_CONTIGUOUS, + self.dtype_is_object) + + return memoryview_copy_from_slice(self, &mslice) + + def copy_fortran(self): + cdef {{memviewslice_name}} src, dst + cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + + slice_copy(self, &src) + dst = slice_copy_contig(&src, "fortran", self.view.ndim, + self.view.itemsize, + flags|PyBUF_F_CONTIGUOUS, + self.dtype_is_object) + + return memoryview_copy_from_slice(self, &dst) + + +@cname('__pyx_memoryview_new') +cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + cdef memoryview result = memoryview(o, flags, dtype_is_object) + result.typeinfo = typeinfo + return result + +@cname('__pyx_memoryview_check') +cdef inline bint memoryview_check(object o) noexcept: + return isinstance(o, memoryview) + +cdef tuple _unellipsify(object index, int ndim): + """ + Replace all ellipses with full slices and fill incomplete indices with + full slices. + """ + cdef Py_ssize_t idx + tup = index if isinstance(index, tuple) else (index,) + + result = [slice(None)] * ndim + have_slices = False + seen_ellipsis = False + idx = 0 + for item in tup: + if item is Ellipsis: + if not seen_ellipsis: + idx += ndim - len(tup) + seen_ellipsis = True + have_slices = True + else: + if isinstance(item, slice): + have_slices = True + elif not PyIndex_Check(item): + raise TypeError, f"Cannot index with type '{type(item)}'" + result[idx] = item + idx += 1 + + nslices = ndim - idx + return have_slices or nslices, tuple(result) + +cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + for suboffset in suboffsets[:ndim]: + if suboffset >= 0: + raise ValueError, "Indirect dimensions not supported" + return 0 # return type just used as an error flag + +# +### Slicing a memoryview +# + +@cname('__pyx_memview_slice') +cdef memoryview memview_slice(memoryview memview, object indices): + cdef int new_ndim = 0, suboffset_dim = -1, dim + cdef bint negative_step + cdef {{memviewslice_name}} src, dst + cdef {{memviewslice_name}} *p_src + + # dst is copied by value in memoryview_fromslice -- initialize it + # src is never copied + memset(&dst, 0, sizeof(dst)) + + cdef _memoryviewslice memviewsliceobj + + assert memview.view.ndim > 0 + + if isinstance(memview, _memoryviewslice): + memviewsliceobj = memview + p_src = &memviewsliceobj.from_slice + else: + slice_copy(memview, &src) + p_src = &src + + # Note: don't use variable src at this point + # SubNote: we should be able to declare variables in blocks... + + # memoryview_fromslice() will inc our dst slice + dst.memview = p_src.memview + dst.data = p_src.data + + # Put everything in temps to avoid this bloody warning: + # "Argument evaluation order in C function call is undefined and + # may not be as expected" + cdef {{memviewslice_name}} *p_dst = &dst + cdef int *p_suboffset_dim = &suboffset_dim + cdef Py_ssize_t start, stop, step, cindex + cdef bint have_start, have_stop, have_step + + for dim, index in enumerate(indices): + if PyIndex_Check(index): + cindex = index + slice_memviewslice( + p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + dim, new_ndim, p_suboffset_dim, + cindex, 0, 0, # start, stop, step + 0, 0, 0, # have_{start,stop,step} + False) + elif index is None: + p_dst.shape[new_ndim] = 1 + p_dst.strides[new_ndim] = 0 + p_dst.suboffsets[new_ndim] = -1 + new_ndim += 1 + else: + start = index.start or 0 + stop = index.stop or 0 + step = index.step or 0 + + have_start = index.start is not None + have_stop = index.stop is not None + have_step = index.step is not None + + slice_memviewslice( + p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + dim, new_ndim, p_suboffset_dim, + start, stop, step, + have_start, have_stop, have_step, + True) + new_ndim += 1 + + if isinstance(memview, _memoryviewslice): + return memoryview_fromslice(dst, new_ndim, + memviewsliceobj.to_object_func, + memviewsliceobj.to_dtype_func, + memview.dtype_is_object) + else: + return memoryview_fromslice(dst, new_ndim, NULL, NULL, + memview.dtype_is_object) + + +# +### Slicing in a single dimension of a memoryviewslice +# + +@cname('__pyx_memoryview_slice_memviewslice') +cdef int slice_memviewslice( + {{memviewslice_name}} *dst, + Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + int dim, int new_ndim, int *suboffset_dim, + Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step, + int have_start, int have_stop, int have_step, + bint is_slice) except -1 nogil: + """ + Create a new slice dst given slice src. + + dim - the current src dimension (indexing will make dimensions + disappear) + new_dim - the new dst dimension + suboffset_dim - pointer to a single int initialized to -1 to keep track of + where slicing offsets should be added + """ + + cdef Py_ssize_t new_shape + cdef bint negative_step + + if not is_slice: + # index is a normal integer-like index + if start < 0: + start += shape + if not 0 <= start < shape: + _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) + else: + # index is a slice + if have_step: + negative_step = step < 0 + if step == 0: + _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + else: + negative_step = False + step = 1 + + # check our bounds and set defaults + if have_start: + if start < 0: + start += shape + if start < 0: + start = 0 + elif start >= shape: + if negative_step: + start = shape - 1 + else: + start = shape + else: + if negative_step: + start = shape - 1 + else: + start = 0 + + if have_stop: + if stop < 0: + stop += shape + if stop < 0: + stop = 0 + elif stop > shape: + stop = shape + else: + if negative_step: + stop = -1 + else: + stop = shape + + # len = ceil( (stop - start) / step ) + with cython.cdivision(True): + new_shape = (stop - start) // step + + if (stop - start) - step * new_shape: + new_shape += 1 + + if new_shape < 0: + new_shape = 0 + + # shape/strides/suboffsets + dst.strides[new_ndim] = stride * step + dst.shape[new_ndim] = new_shape + dst.suboffsets[new_ndim] = suboffset + + # Add the slicing or indexing offsets to the right suboffset or base data * + if suboffset_dim[0] < 0: + dst.data += start * stride + else: + dst.suboffsets[suboffset_dim[0]] += start * stride + + if suboffset >= 0: + if not is_slice: + if new_ndim == 0: + dst.data = ( dst.data)[0] + suboffset + else: + _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " + "must be indexed and not sliced", dim) + else: + suboffset_dim[0] = new_ndim + + return 0 + +# +### Index a memoryview +# +@cname('__pyx_pybuffer_index') +cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + Py_ssize_t dim) except NULL: + cdef Py_ssize_t shape, stride, suboffset = -1 + cdef Py_ssize_t itemsize = view.itemsize + cdef char *resultp + + if view.ndim == 0: + shape = view.len // itemsize + stride = itemsize + else: + shape = view.shape[dim] + stride = view.strides[dim] + if view.suboffsets != NULL: + suboffset = view.suboffsets[dim] + + if index < 0: + index += view.shape[dim] + if index < 0: + raise IndexError, f"Out of bounds on buffer access (axis {dim})" + + if index >= shape: + raise IndexError, f"Out of bounds on buffer access (axis {dim})" + + resultp = bufp + index * stride + if suboffset >= 0: + resultp = ( resultp)[0] + suboffset + + return resultp + +# +### Transposing a memoryviewslice +# +@cname('__pyx_memslice_transpose') +cdef int transpose_memslice({{memviewslice_name}} *memslice) except -1 nogil: + cdef int ndim = memslice.memview.view.ndim + + cdef Py_ssize_t *shape = memslice.shape + cdef Py_ssize_t *strides = memslice.strides + + # reverse strides and shape + cdef int i, j + for i in range(ndim // 2): + j = ndim - 1 - i + strides[i], strides[j] = strides[j], strides[i] + shape[i], shape[j] = shape[j], shape[i] + + if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") + + return 0 + +# +### Creating new memoryview objects from slices and memoryviews +# +@cython.collection_type("sequence") +@cname('__pyx_memoryviewslice') +cdef class _memoryviewslice(memoryview): + "Internal class for passing memoryview slices to Python" + + # We need this to keep our shape/strides/suboffset pointers valid + cdef {{memviewslice_name}} from_slice + # We need this only to print it's class' name + cdef object from_object + + cdef object (*to_object_func)(char *) + cdef int (*to_dtype_func)(char *, object) except 0 + + def __dealloc__(self): + __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + + cdef convert_item_to_object(self, char *itemp): + if self.to_object_func != NULL: + return self.to_object_func(itemp) + else: + return memoryview.convert_item_to_object(self, itemp) + + cdef assign_item_from_object(self, char *itemp, object value): + if self.to_dtype_func != NULL: + self.to_dtype_func(itemp, value) + else: + memoryview.assign_item_from_object(self, itemp, value) + + cdef _get_base(self): + return self.from_object + + # Sequence methods + try: + count = __pyx_collections_abc_Sequence.count + index = __pyx_collections_abc_Sequence.index + except: + pass + +try: + if __pyx_collections_abc_Sequence: + # The main value of registering _memoryviewslice as a + # Sequence is that it can be used in structural pattern + # matching in Python 3.10+ + __pyx_collections_abc_Sequence.register(_memoryviewslice) + __pyx_collections_abc_Sequence.register(array) +except: + pass # ignore failure, it's a minor issue + +@cname('__pyx_memoryview_fromslice') +cdef memoryview_fromslice({{memviewslice_name}} memviewslice, + int ndim, + object (*to_object_func)(char *), + int (*to_dtype_func)(char *, object) except 0, + bint dtype_is_object): + + cdef _memoryviewslice result + + if memviewslice.memview == Py_None: + return None + + # assert 0 < ndim <= memviewslice.memview.view.ndim, ( + # ndim, memviewslice.memview.view.ndim) + + result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) + + result.from_slice = memviewslice + __PYX_INC_MEMVIEW(&memviewslice, 1) + + result.from_object = ( memviewslice.memview)._get_base() + result.typeinfo = memviewslice.memview.typeinfo + + result.view = memviewslice.memview.view + result.view.buf = memviewslice.data + result.view.ndim = ndim + (<__pyx_buffer *> &result.view).obj = Py_None + Py_INCREF(Py_None) + + if (memviewslice.memview).flags & PyBUF_WRITABLE: + result.flags = PyBUF_RECORDS + else: + result.flags = PyBUF_RECORDS_RO + + result.view.shape = result.from_slice.shape + result.view.strides = result.from_slice.strides + + # only set suboffsets if actually used, otherwise set to NULL to improve compatibility + result.view.suboffsets = NULL + for suboffset in result.from_slice.suboffsets[:ndim]: + if suboffset >= 0: + result.view.suboffsets = result.from_slice.suboffsets + break + + result.view.len = result.view.itemsize + for length in result.view.shape[:ndim]: + result.view.len *= length + + result.to_object_func = to_object_func + result.to_dtype_func = to_dtype_func + + return result + +@cname('__pyx_memoryview_get_slice_from_memoryview') +cdef {{memviewslice_name}} *get_slice_from_memview(memoryview memview, + {{memviewslice_name}} *mslice) except NULL: + cdef _memoryviewslice obj + if isinstance(memview, _memoryviewslice): + obj = memview + return &obj.from_slice + else: + slice_copy(memview, mslice) + return mslice + +@cname('__pyx_memoryview_slice_copy') +cdef void slice_copy(memoryview memview, {{memviewslice_name}} *dst) noexcept: + cdef int dim + cdef (Py_ssize_t*) shape, strides, suboffsets + + shape = memview.view.shape + strides = memview.view.strides + suboffsets = memview.view.suboffsets + + dst.memview = <__pyx_memoryview *> memview + dst.data = memview.view.buf + + for dim in range(memview.view.ndim): + dst.shape[dim] = shape[dim] + dst.strides[dim] = strides[dim] + dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + +@cname('__pyx_memoryview_copy_object') +cdef memoryview_copy(memoryview memview): + "Create a new memoryview object" + cdef {{memviewslice_name}} memviewslice + slice_copy(memview, &memviewslice) + return memoryview_copy_from_slice(memview, &memviewslice) + +@cname('__pyx_memoryview_copy_object_from_slice') +cdef memoryview_copy_from_slice(memoryview memview, {{memviewslice_name}} *memviewslice): + """ + Create a new memoryview object from a given memoryview object and slice. + """ + cdef object (*to_object_func)(char *) + cdef int (*to_dtype_func)(char *, object) except 0 + + if isinstance(memview, _memoryviewslice): + to_object_func = (<_memoryviewslice> memview).to_object_func + to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + else: + to_object_func = NULL + to_dtype_func = NULL + + return memoryview_fromslice(memviewslice[0], memview.view.ndim, + to_object_func, to_dtype_func, + memview.dtype_is_object) + + +# +### Copy the contents of a memoryview slices +# +cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: + return -arg if arg < 0 else arg + +@cname('__pyx_get_best_slice_order') +cdef char get_best_order({{memviewslice_name}} *mslice, int ndim) noexcept nogil: + """ + Figure out the best memory access order for a given slice. + """ + cdef int i + cdef Py_ssize_t c_stride = 0 + cdef Py_ssize_t f_stride = 0 + + for i in range(ndim - 1, -1, -1): + if mslice.shape[i] > 1: + c_stride = mslice.strides[i] + break + + for i in range(ndim): + if mslice.shape[i] > 1: + f_stride = mslice.strides[i] + break + + if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + return 'C' + else: + return 'F' + +@cython.cdivision(True) +cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, + char *dst_data, Py_ssize_t *dst_strides, + Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + int ndim, size_t itemsize) noexcept nogil: + # Note: src_extent is 1 if we're broadcasting + # dst_extent always >= src_extent as we don't do reductions + cdef Py_ssize_t i + cdef Py_ssize_t src_extent = src_shape[0] + cdef Py_ssize_t dst_extent = dst_shape[0] + cdef Py_ssize_t src_stride = src_strides[0] + cdef Py_ssize_t dst_stride = dst_strides[0] + + if ndim == 1: + if (src_stride > 0 and dst_stride > 0 and + src_stride == itemsize == dst_stride): + memcpy(dst_data, src_data, itemsize * dst_extent) + else: + for i in range(dst_extent): + memcpy(dst_data, src_data, itemsize) + src_data += src_stride + dst_data += dst_stride + else: + for i in range(dst_extent): + _copy_strided_to_strided(src_data, src_strides + 1, + dst_data, dst_strides + 1, + src_shape + 1, dst_shape + 1, + ndim - 1, itemsize) + src_data += src_stride + dst_data += dst_stride + +cdef void copy_strided_to_strided({{memviewslice_name}} *src, + {{memviewslice_name}} *dst, + int ndim, size_t itemsize) noexcept nogil: + _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, + src.shape, dst.shape, ndim, itemsize) + +@cname('__pyx_memoryview_slice_get_size') +cdef Py_ssize_t slice_get_size({{memviewslice_name}} *src, int ndim) noexcept nogil: + "Return the size of the memory occupied by the slice in number of bytes" + cdef Py_ssize_t shape, size = src.memview.view.itemsize + + for shape in src.shape[:ndim]: + size *= shape + + return size + +@cname('__pyx_fill_contig_strides_array') +cdef Py_ssize_t fill_contig_strides_array( + Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + int ndim, char order) noexcept nogil: + """ + Fill the strides array for a slice with C or F contiguous strides. + This is like PyBuffer_FillContiguousStrides, but compatible with py < 2.6 + """ + cdef int idx + + if order == 'F': + for idx in range(ndim): + strides[idx] = stride + stride *= shape[idx] + else: + for idx in range(ndim - 1, -1, -1): + strides[idx] = stride + stride *= shape[idx] + + return stride + +@cname('__pyx_memoryview_copy_data_to_temp') +cdef void *copy_data_to_temp({{memviewslice_name}} *src, + {{memviewslice_name}} *tmpslice, + char order, + int ndim) except NULL nogil: + """ + Copy a direct slice to temporary contiguous memory. The caller should free + the result when done. + """ + cdef int i + cdef void *result + + cdef size_t itemsize = src.memview.view.itemsize + cdef size_t size = slice_get_size(src, ndim) + + result = malloc(size) + if not result: + _err_no_memory() + + # tmpslice[0] = src + tmpslice.data = result + tmpslice.memview = src.memview + for i in range(ndim): + tmpslice.shape[i] = src.shape[i] + tmpslice.suboffsets[i] = -1 + + fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) + + # We need to broadcast strides again + for i in range(ndim): + if tmpslice.shape[i] == 1: + tmpslice.strides[i] = 0 + + if slice_is_contig(src[0], order, ndim): + memcpy(result, src.data, size) + else: + copy_strided_to_strided(src, tmpslice, ndim, itemsize) + + return result + +# Use 'with gil' functions and avoid 'with gil' blocks, as the code within the blocks +# has temporaries that need the GIL to clean up +@cname('__pyx_memoryview_err_extents') +cdef int _err_extents(int i, Py_ssize_t extent1, + Py_ssize_t extent2) except -1 with gil: + raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" + +@cname('__pyx_memoryview_err_dim') +cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: + raise error, msg % dim + +@cname('__pyx_memoryview_err') +cdef int _err(PyObject *error, str msg) except -1 with gil: + raise error, msg + +@cname('__pyx_memoryview_err_no_memory') +cdef int _err_no_memory() except -1 with gil: + raise MemoryError + + +@cname('__pyx_memoryview_copy_contents') +cdef int memoryview_copy_contents({{memviewslice_name}} src, + {{memviewslice_name}} dst, + int src_ndim, int dst_ndim, + bint dtype_is_object) except -1 nogil: + """ + Copy memory from slice src to slice dst. + Check for overlapping memory and verify the shapes. + """ + cdef void *tmpdata = NULL + cdef size_t itemsize = src.memview.view.itemsize + cdef int i + cdef char order = get_best_order(&src, src_ndim) + cdef bint broadcasting = False + cdef bint direct_copy = False + cdef {{memviewslice_name}} tmp + + if src_ndim < dst_ndim: + broadcast_leading(&src, src_ndim, dst_ndim) + elif dst_ndim < src_ndim: + broadcast_leading(&dst, dst_ndim, src_ndim) + + cdef int ndim = max(src_ndim, dst_ndim) + + for i in range(ndim): + if src.shape[i] != dst.shape[i]: + if src.shape[i] == 1: + broadcasting = True + src.strides[i] = 0 + else: + _err_extents(i, dst.shape[i], src.shape[i]) + + if src.suboffsets[i] >= 0: + _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + + if slices_overlap(&src, &dst, ndim, itemsize): + # slices overlap, copy to temp, copy temp to dst + if not slice_is_contig(src, order, ndim): + order = get_best_order(&dst, ndim) + + tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + src = tmp + + if not broadcasting: + # See if both slices have equal contiguity, in that case perform a + # direct copy. This only works when we are not broadcasting. + if slice_is_contig(src, 'C', ndim): + direct_copy = slice_is_contig(dst, 'C', ndim) + elif slice_is_contig(src, 'F', ndim): + direct_copy = slice_is_contig(dst, 'F', ndim) + + if direct_copy: + # Contiguous slices with same order + refcount_copying(&dst, dtype_is_object, ndim, inc=False) + memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + refcount_copying(&dst, dtype_is_object, ndim, inc=True) + free(tmpdata) + return 0 + + if order == 'F' == get_best_order(&dst, ndim): + # see if both slices have Fortran order, transpose them to match our + # C-style indexing order + transpose_memslice(&src) + transpose_memslice(&dst) + + refcount_copying(&dst, dtype_is_object, ndim, inc=False) + copy_strided_to_strided(&src, &dst, ndim, itemsize) + refcount_copying(&dst, dtype_is_object, ndim, inc=True) + + free(tmpdata) + return 0 + +@cname('__pyx_memoryview_broadcast_leading') +cdef void broadcast_leading({{memviewslice_name}} *mslice, + int ndim, + int ndim_other) noexcept nogil: + cdef int i + cdef int offset = ndim_other - ndim + + for i in range(ndim - 1, -1, -1): + mslice.shape[i + offset] = mslice.shape[i] + mslice.strides[i + offset] = mslice.strides[i] + mslice.suboffsets[i + offset] = mslice.suboffsets[i] + + for i in range(offset): + mslice.shape[i] = 1 + mslice.strides[i] = mslice.strides[0] + mslice.suboffsets[i] = -1 + +# +### Take care of refcounting the objects in slices. Do this separately from any copying, +### to minimize acquiring the GIL +# + +@cname('__pyx_memoryview_refcount_copying') +cdef void refcount_copying({{memviewslice_name}} *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: + # incref or decref the objects in the destination slice if the dtype is object + if dtype_is_object: + refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) + +@cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') +cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, + bint inc) noexcept with gil: + refcount_objects_in_slice(data, shape, strides, ndim, inc) + +@cname('__pyx_memoryview_refcount_objects_in_slice') +cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, bint inc) noexcept: + cdef Py_ssize_t i + cdef Py_ssize_t stride = strides[0] + + for i in range(shape[0]): + if ndim == 1: + if inc: + Py_INCREF(( data)[0]) + else: + Py_DECREF(( data)[0]) + else: + refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) + + data += stride + +# +### Scalar to slice assignment +# +@cname('__pyx_memoryview_slice_assign_scalar') +cdef void slice_assign_scalar({{memviewslice_name}} *dst, int ndim, + size_t itemsize, void *item, + bint dtype_is_object) noexcept nogil: + refcount_copying(dst, dtype_is_object, ndim, inc=False) + _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) + refcount_copying(dst, dtype_is_object, ndim, inc=True) + + +@cname('__pyx_memoryview__slice_assign_scalar') +cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, + size_t itemsize, void *item) noexcept nogil: + cdef Py_ssize_t i + cdef Py_ssize_t stride = strides[0] + cdef Py_ssize_t extent = shape[0] + + if ndim == 1: + for i in range(extent): + memcpy(data, item, itemsize) + data += stride + else: + for i in range(extent): + _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) + data += stride + + +############### BufferFormatFromTypeInfo ############### +cdef extern from *: + ctypedef struct __Pyx_StructField + + cdef enum: + __PYX_BUF_FLAGS_PACKED_STRUCT + __PYX_BUF_FLAGS_INTEGER_COMPLEX + + ctypedef struct __Pyx_TypeInfo: + char* name + __Pyx_StructField* fields + size_t size + size_t arraysize[8] + int ndim + char typegroup + char is_unsigned + int flags + + ctypedef struct __Pyx_StructField: + __Pyx_TypeInfo* type + char* name + size_t offset + + ctypedef struct __Pyx_BufFmt_StackElem: + __Pyx_StructField* field + size_t parent_offset + + #ctypedef struct __Pyx_BufFmt_Context: + # __Pyx_StructField root + __Pyx_BufFmt_StackElem* head + + struct __pyx_typeinfo_string: + char string[3] + + __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *) + + +@cname('__pyx_format_from_typeinfo') +cdef bytes format_from_typeinfo(__Pyx_TypeInfo *type): + cdef __Pyx_StructField *field + cdef __pyx_typeinfo_string fmt + cdef bytes part, result + cdef Py_ssize_t i + + if type.typegroup == 'S': + assert type.fields != NULL + assert type.fields.type != NULL + + if type.flags & __PYX_BUF_FLAGS_PACKED_STRUCT: + alignment = b'^' + else: + alignment = b'' + + parts = [b"T{"] + field = type.fields + + while field.type: + part = format_from_typeinfo(field.type) + parts.append(part + b':' + field.name + b':') + field += 1 + + result = alignment.join(parts) + b'}' + else: + fmt = __Pyx_TypeInfoToFormat(type) + result = fmt.string + if type.arraysize[0]: + extents = [f"{type.arraysize[i]}" for i in range(type.ndim)] + result = f"({u','.join(extents)})".encode('ascii') + result + + return result diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Profile.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Profile.c new file mode 100644 index 0000000000000000000000000000000000000000..2b8564b226fd5b3b74f52ca3f6dc7d25fd581762 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/Profile.c @@ -0,0 +1,383 @@ +/////////////// Profile.proto /////////////// +//@requires: Exceptions.c::PyErrFetchRestore +//@substitute: naming + +// Note that cPython ignores PyTrace_EXCEPTION, +// but maybe some other profilers don't. + +#ifndef CYTHON_PROFILE +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + #define CYTHON_PROFILE 0 +#else + #define CYTHON_PROFILE 1 +#endif +#endif + +#ifndef CYTHON_TRACE_NOGIL + #define CYTHON_TRACE_NOGIL 0 +#else + #if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE) + #define CYTHON_TRACE 1 + #endif +#endif + +#ifndef CYTHON_TRACE + #define CYTHON_TRACE 0 +#endif + +#if CYTHON_TRACE + #undef CYTHON_PROFILE_REUSE_FRAME +#endif + +#ifndef CYTHON_PROFILE_REUSE_FRAME + #define CYTHON_PROFILE_REUSE_FRAME 0 +#endif + +#if CYTHON_PROFILE || CYTHON_TRACE + + #include "compile.h" + #include "frameobject.h" + #include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + + #if CYTHON_PROFILE_REUSE_FRAME + #define CYTHON_FRAME_MODIFIER static + #define CYTHON_FRAME_DEL(frame) + #else + #define CYTHON_FRAME_MODIFIER + #define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame) + #endif + + #define __Pyx_TraceDeclarations \ + static PyCodeObject *$frame_code_cname = NULL; \ + CYTHON_FRAME_MODIFIER PyFrameObject *$frame_cname = NULL; \ + int __Pyx_use_tracing = 0; + + #define __Pyx_TraceFrameInit(codeobj) \ + if (codeobj) $frame_code_cname = (PyCodeObject*) codeobj; + + +#if PY_VERSION_HEX >= 0x030b00a2 + #if PY_VERSION_HEX >= 0x030C00b1 + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + ((!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + #else + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->cframe->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + #endif + + #define __Pyx_EnterTracing(tstate) PyThreadState_EnterTracing(tstate) + #define __Pyx_LeaveTracing(tstate) PyThreadState_LeaveTracing(tstate) + +#elif PY_VERSION_HEX >= 0x030a00b1 + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->cframe->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + + #define __Pyx_EnterTracing(tstate) \ + do { tstate->tracing++; tstate->cframe->use_tracing = 0; } while (0) + + #define __Pyx_LeaveTracing(tstate) \ + do { \ + tstate->tracing--; \ + tstate->cframe->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \ + || tstate->c_profilefunc != NULL); \ + } while (0) + +#else + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + + #define __Pyx_EnterTracing(tstate) \ + do { tstate->tracing++; tstate->use_tracing = 0; } while (0) + + #define __Pyx_LeaveTracing(tstate) \ + do { \ + tstate->tracing--; \ + tstate->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \ + || tstate->c_profilefunc != NULL); \ + } while (0) + +#endif + + #ifdef WITH_THREAD + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + } \ + PyGILState_Release(state); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } else { \ + PyThreadState* tstate = PyThreadState_GET(); \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } + #else + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + { PyThreadState* tstate = PyThreadState_GET(); \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } + #endif + + #define __Pyx_TraceException() \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 1)) { \ + __Pyx_EnterTracing(tstate); \ + PyObject *exc_info = __Pyx_GetExceptionTuple(tstate); \ + if (exc_info) { \ + if (CYTHON_TRACE && tstate->c_tracefunc) \ + tstate->c_tracefunc( \ + tstate->c_traceobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + tstate->c_profilefunc( \ + tstate->c_profileobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + Py_DECREF(exc_info); \ + } \ + __Pyx_LeaveTracing(tstate); \ + } \ + } + + static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) { + PyObject *type, *value, *traceback; + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + __Pyx_EnterTracing(tstate); + if (CYTHON_TRACE && tstate->c_tracefunc) + tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result); + if (tstate->c_profilefunc) + tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result); + CYTHON_FRAME_DEL(frame); + __Pyx_LeaveTracing(tstate); + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + } + + #ifdef WITH_THREAD + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + PyGILState_Release(state); \ + } \ + } else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } \ + } + #else + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } + #endif + + static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); /*proto*/ + static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno); /*proto*/ + +#else + + #define __Pyx_TraceDeclarations + #define __Pyx_TraceFrameInit(codeobj) + // mark error label as used to avoid compiler warnings + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) if ((1)); else goto_error; + #define __Pyx_TraceException() + #define __Pyx_TraceReturn(result, nogil) + +#endif /* CYTHON_PROFILE */ + +#if CYTHON_TRACE + // see call_trace_protected() in CPython's ceval.c + static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) { + int ret; + PyObject *type, *value, *traceback; + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + __Pyx_PyFrame_SetLineNumber(frame, lineno); + __Pyx_EnterTracing(tstate); + + ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL); + + __Pyx_LeaveTracing(tstate); + if (likely(!ret)) { + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + return ret; + } + + #ifdef WITH_THREAD + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + int ret = 0; \ + PyThreadState *tstate; \ + PyGILState_STATE state = __Pyx_PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + } \ + __Pyx_PyGILState_Release(state); \ + if (unlikely(ret)) goto_error; \ + } \ + } else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + if (unlikely(ret)) goto_error; \ + } \ + } \ + } + #else + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + if (unlikely(ret)) goto_error; \ + } \ + } + #endif +#else + // mark error label as used to avoid compiler warnings + #define __Pyx_TraceLine(lineno, nogil, goto_error) if ((1)); else goto_error; +#endif + +/////////////// Profile /////////////// +//@substitute: naming + +#if CYTHON_PROFILE + +static int __Pyx_TraceSetupAndCall(PyCodeObject** code, + PyFrameObject** frame, + PyThreadState* tstate, + const char *funcname, + const char *srcfile, + int firstlineno) { + PyObject *type, *value, *traceback; + int retval; + if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) { + if (*code == NULL) { + *code = __Pyx_createFrameCodeObject(funcname, srcfile, firstlineno); + if (*code == NULL) return 0; + } + *frame = PyFrame_New( + tstate, /*PyThreadState *tstate*/ + *code, /*PyCodeObject *code*/ + $moddict_cname, /*PyObject *globals*/ + 0 /*PyObject *locals*/ + ); + if (*frame == NULL) return 0; + if (CYTHON_TRACE && (*frame)->f_trace == NULL) { + // this enables "f_lineno" lookup, at least in CPython ... + Py_INCREF(Py_None); + (*frame)->f_trace = Py_None; + } +#if PY_VERSION_HEX < 0x030400B1 + } else { + (*frame)->f_tstate = tstate; +#endif + } + __Pyx_PyFrame_SetLineNumber(*frame, firstlineno); + + retval = 1; + __Pyx_EnterTracing(tstate); + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + + #if CYTHON_TRACE + if (tstate->c_tracefunc) + retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0; + if (retval && tstate->c_profilefunc) + #endif + retval = tstate->c_profilefunc(tstate->c_profileobj, *frame, PyTrace_CALL, NULL) == 0; + + __Pyx_LeaveTracing(tstate); + if (retval) { + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + return __Pyx_IsTracing(tstate, 0, 0) && retval; + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + return -1; + } +} + +static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) { + PyCodeObject *py_code = 0; + +#if PY_MAJOR_VERSION >= 3 + py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno); + // make CPython use a fresh dict for "f_locals" at need (see GH #1836) + if (likely(py_code)) { + py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS; + } +#else + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + + py_funcname = PyString_FromString(funcname); + if (unlikely(!py_funcname)) goto bad; + py_srcfile = PyString_FromString(srcfile); + if (unlikely(!py_srcfile)) goto bad; + + py_code = PyCode_New( + 0, /*int argcount,*/ + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + // make CPython use a fresh dict for "f_locals" at need (see GH #1836) + CO_OPTIMIZED | CO_NEWLOCALS, /*int flags,*/ + $empty_bytes, /*PyObject *code,*/ + $empty_tuple, /*PyObject *consts,*/ + $empty_tuple, /*PyObject *names,*/ + $empty_tuple, /*PyObject *varnames,*/ + $empty_tuple, /*PyObject *freevars,*/ + $empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + firstlineno, /*int firstlineno,*/ + $empty_bytes /*PyObject *lnotab*/ + ); + +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); +#endif + + return py_code; +} + +#endif /* CYTHON_PROFILE */ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/StringTools.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/StringTools.c new file mode 100644 index 0000000000000000000000000000000000000000..cec7c224fa20da7eed1c3f838c3c38a5bc17286f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/StringTools.c @@ -0,0 +1,1294 @@ + +//////////////////// IncludeStringH.proto //////////////////// + +#include + +//////////////////// IncludeCppStringH.proto //////////////////// + +#include + + +//////////////////// ssize_pyunicode_strlen.proto //////////////////// + +static CYTHON_INLINE Py_ssize_t __Pyx_Py_UNICODE_ssize_strlen(const Py_UNICODE *u);/*proto*/ + +//////////////////// ssize_pyunicode_strlen //////////////////// +//@requires: pyunicode_strlen + +static CYTHON_INLINE Py_ssize_t __Pyx_Py_UNICODE_ssize_strlen(const Py_UNICODE *u) { + size_t len = __Pyx_Py_UNICODE_strlen(u); + if (unlikely(len > PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "Py_UNICODE string is too long"); + return -1; + } + return (Py_ssize_t) len; +} + +//////////////////// pyunicode_strlen.proto /////////////// + +// There used to be a Py_UNICODE_strlen() in CPython 3.x, but it is deprecated since Py3.3. +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u); /* proto */ + +//////////////////// pyunicode_strlen ///////////////////// + +// Note: will not work in the limited API since Py_UNICODE is not available there. +// May stop working at some point after Python 3.13 (deprecated) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) +{ + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} + +//////////////////// pyunicode_from_unicode.proto ////////////////////// +//@requires: pyunicode_strlen + +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode + +//////////////////// InitStrings.proto //////////////////// + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ + +//////////////////// InitStrings //////////////////// + +#if PY_MAJOR_VERSION >= 3 +static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { + if (t.is_unicode | t.is_str) { + if (t.intern) { + *str = PyUnicode_InternFromString(t.s); + } else if (t.encoding) { + *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); + } else { + *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); + } + } else { + *str = PyBytes_FromStringAndSize(t.s, t.n - 1); + } + if (!*str) + return -1; + // initialise cached hash value + if (PyObject_Hash(*str) == -1) + return -1; + return 0; +} +#endif + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION >= 3 /* Python 3+ has unicode identifiers */ + __Pyx_InitString(*t, t->p); + #else + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + if (!*t->p) + return -1; + // initialise cached hash value + if (PyObject_Hash(*t->p) == -1) + return -1; + #endif + ++t; + } + return 0; +} + +//////////////////// BytesContains.proto //////////////////// + +static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /*proto*/ + +//////////////////// BytesContains //////////////////// +//@requires: IncludeStringH + +static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) { + const Py_ssize_t length = PyBytes_GET_SIZE(bytes); + char* char_start = PyBytes_AS_STRING(bytes); + return memchr(char_start, (unsigned char)character, (size_t)length) != NULL; +} + + +//////////////////// PyUCS4InUnicode.proto //////////////////// + +static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character); /*proto*/ + +//////////////////// PyUCS4InUnicode //////////////////// + +#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE)) + +#if PY_VERSION_HEX < 0x03090000 +#define __Pyx_PyUnicode_AS_UNICODE(op) PyUnicode_AS_UNICODE(op) +#define __Pyx_PyUnicode_GET_SIZE(op) PyUnicode_GET_SIZE(op) +#else +// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. +// https://www.python.org/dev/peps/pep-0623/ +#define __Pyx_PyUnicode_AS_UNICODE(op) (((PyASCIIObject *)(op))->wstr) +#define __Pyx_PyUnicode_GET_SIZE(op) ((PyCompactUnicodeObject *)(op))->wstr_length +#endif + +#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2 +static int __Pyx_PyUnicodeBufferContainsUCS4_SP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) { + /* handle surrogate pairs for Py_UNICODE buffers in 16bit Unicode builds */ + Py_UNICODE high_val, low_val; + Py_UNICODE* pos; + high_val = (Py_UNICODE) (0xD800 | (((character - 0x10000) >> 10) & ((1<<10)-1))); + low_val = (Py_UNICODE) (0xDC00 | ( (character - 0x10000) & ((1<<10)-1))); + for (pos=buffer; pos < buffer+length-1; pos++) { + if (unlikely((high_val == pos[0]) & (low_val == pos[1]))) return 1; + } + return 0; +} +#endif + +static int __Pyx_PyUnicodeBufferContainsUCS4_BMP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) { + Py_UNICODE uchar; + Py_UNICODE* pos; + uchar = (Py_UNICODE) character; + for (pos=buffer; pos < buffer+length; pos++) { + if (unlikely(uchar == pos[0])) return 1; + } + return 0; +} +#endif + +static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character) { +#if CYTHON_PEP393_ENABLED + const int kind = PyUnicode_KIND(unicode); + #ifdef PyUnicode_WCHAR_KIND + if (likely(kind != PyUnicode_WCHAR_KIND)) + #endif + { + Py_ssize_t i; + const void* udata = PyUnicode_DATA(unicode); + const Py_ssize_t length = PyUnicode_GET_LENGTH(unicode); + for (i=0; i < length; i++) { + if (unlikely(character == PyUnicode_READ(kind, udata, i))) return 1; + } + return 0; + } +#elif PY_VERSION_HEX >= 0x03090000 + #error Cannot use "UChar in Unicode" in Python 3.9 without PEP-393 unicode strings. +#elif !defined(PyUnicode_AS_UNICODE) + #error Cannot use "UChar in Unicode" in Python < 3.9 without Py_UNICODE support. +#endif + +#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE)) +#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2 + if ((sizeof(Py_UNICODE) == 2) && unlikely(character > 65535)) { + return __Pyx_PyUnicodeBufferContainsUCS4_SP( + __Pyx_PyUnicode_AS_UNICODE(unicode), + __Pyx_PyUnicode_GET_SIZE(unicode), + character); + } else +#endif + { + return __Pyx_PyUnicodeBufferContainsUCS4_BMP( + __Pyx_PyUnicode_AS_UNICODE(unicode), + __Pyx_PyUnicode_GET_SIZE(unicode), + character); + + } +#endif +} + + +//////////////////// PyUnicodeContains.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { + int result = PyUnicode_Contains(text, substring); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + + +//////////////////// CStringEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/ + +//////////////////// CStringEquals //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) { + while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } + return *s1 == *s2; +} + + +//////////////////// StrEquals.proto //////////////////// +//@requires: BytesEquals +//@requires: UnicodeEquals + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + + +//////////////////// UnicodeEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ + +//////////////////// UnicodeEquals //////////////////// +//@requires: BytesEquals + +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + // len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2") + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + + +//////////////////// BytesEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ + +//////////////////// BytesEquals //////////////////// +//@requires: IncludeStringH + +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + // len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2") + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +//////////////////// GetItemIntByteArray.proto //////////////////// + +#define __Pyx_GetItemInt_ByteArray(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1)) + +static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, + int wraparound, int boundscheck); + +//////////////////// GetItemIntByteArray //////////////////// + +static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (wraparound | boundscheck) { + length = PyByteArray_GET_SIZE(string); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + return (unsigned char) (PyByteArray_AS_STRING(string)[i]); + } else { + PyErr_SetString(PyExc_IndexError, "bytearray index out of range"); + return -1; + } + } else { + return (unsigned char) (PyByteArray_AS_STRING(string)[i]); + } +} + + +//////////////////// SetItemIntByteArray.proto //////////////////// + +#define __Pyx_SetItemInt_ByteArray(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_SetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, v, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1)) + +static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v, + int wraparound, int boundscheck); + +//////////////////// SetItemIntByteArray //////////////////// + +static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (wraparound | boundscheck) { + length = PyByteArray_GET_SIZE(string); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + PyByteArray_AS_STRING(string)[i] = (char) v; + return 0; + } else { + PyErr_SetString(PyExc_IndexError, "bytearray index out of range"); + return -1; + } + } else { + PyByteArray_AS_STRING(string)[i] = (char) v; + return 0; + } +} + + +//////////////////// GetItemIntUnicode.proto //////////////////// + +#define __Pyx_GetItemInt_Unicode(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_Unicode_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "string index out of range"), (Py_UCS4)-1)) + +static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i, + int wraparound, int boundscheck); + +//////////////////// GetItemIntUnicode //////////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return (Py_UCS4)-1; + if (wraparound | boundscheck) { + length = __Pyx_PyUnicode_GET_LENGTH(ustring); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + return __Pyx_PyUnicode_READ_CHAR(ustring, i); + } else { + PyErr_SetString(PyExc_IndexError, "string index out of range"); + return (Py_UCS4)-1; + } + } else { + return __Pyx_PyUnicode_READ_CHAR(ustring, i); + } +} + + +/////////////// decode_c_string_utf16.proto /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/////////////// decode_cpp_string.proto /////////////// +//@requires: IncludeCppStringH +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_cpp_string( + std::string cppstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + return __Pyx_decode_c_bytes( + cppstring.data(), cppstring.size(), start, stop, encoding, errors, decode_func); +} + +/////////////// decode_c_string.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/////////////// decode_c_string /////////////// +//@requires: IncludeStringH +//@requires: decode_c_string_utf16 +//@substitute: naming + +/* duplicate code to avoid calling strlen() if start >= 0 and stop >= 0 */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef($empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/////////////// decode_c_bytes.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/////////////// decode_c_bytes /////////////// +//@requires: decode_c_string_utf16 +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + if (unlikely((start < 0) | (stop < 0))) { + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (stop > length) + stop = length; + if (unlikely(stop <= start)) + return __Pyx_NewRef($empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/////////////// decode_bytes.proto /////////////// +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_bytes( + PyObject* string, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyBytes_AS_STRING(string); + size = PyBytes_GET_SIZE(string); +#else + if (PyBytes_AsStringAndSize(string, &as_c_string, &size) < 0) { + return NULL; + } +#endif + return __Pyx_decode_c_bytes( + as_c_string, size, + start, stop, encoding, errors, decode_func); +} + +/////////////// decode_bytearray.proto /////////////// +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_bytearray( + PyObject* string, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyByteArray_AS_STRING(string); + size = PyByteArray_GET_SIZE(string); +#else + if (!(as_c_string = PyByteArray_AsString(string))) return NULL; + if ((size = PyByteArray_Size(string)) < 0) return NULL; +#endif + return __Pyx_decode_c_bytes( + as_c_string, size, + start, stop, encoding, errors, decode_func); +} + +/////////////// PyUnicode_Substring.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring( + PyObject* text, Py_ssize_t start, Py_ssize_t stop); + +/////////////// PyUnicode_Substring /////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring( + PyObject* text, Py_ssize_t start, Py_ssize_t stop) { + Py_ssize_t length; + if (unlikely(__Pyx_PyUnicode_READY(text) == -1)) return NULL; + length = __Pyx_PyUnicode_GET_LENGTH(text); + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + else if (stop > length) + stop = length; + if (stop <= start) + return __Pyx_NewRef($empty_unicode); + if (start == 0 && stop == length) + return __Pyx_NewRef(text); +#if CYTHON_PEP393_ENABLED + return PyUnicode_FromKindAndData(PyUnicode_KIND(text), + PyUnicode_1BYTE_DATA(text) + start*PyUnicode_KIND(text), stop-start); +#else + return PyUnicode_FromUnicode(PyUnicode_AS_UNICODE(text)+start, stop-start); +#endif +} + + +/////////////// py_unicode_istitle.proto /////////////// + +// Py_UNICODE_ISTITLE() doesn't match unicode.istitle() as the latter +// additionally allows character that comply with Py_UNICODE_ISUPPER() + +#if PY_VERSION_HEX < 0x030200A2 +static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UNICODE uchar) +#else +static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UCS4 uchar) +#endif +{ + return Py_UNICODE_ISTITLE(uchar) || Py_UNICODE_ISUPPER(uchar); +} + + +/////////////// unicode_tailmatch.proto /////////////// + +static int __Pyx_PyUnicode_Tailmatch( + PyObject* s, PyObject* substr, Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ + +/////////////// unicode_tailmatch /////////////// + +// Python's unicode.startswith() and unicode.endswith() support a +// tuple of prefixes/suffixes, whereas it's much more common to +// test for a single unicode string. + +static int __Pyx_PyUnicode_TailmatchTuple(PyObject* s, PyObject* substrings, + Py_ssize_t start, Py_ssize_t end, int direction) { + Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); + for (i = 0; i < count; i++) { + Py_ssize_t result; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + result = PyUnicode_Tailmatch(s, PyTuple_GET_ITEM(substrings, i), + start, end, direction); +#else + PyObject* sub = PySequence_ITEM(substrings, i); + if (unlikely(!sub)) return -1; + result = PyUnicode_Tailmatch(s, sub, start, end, direction); + Py_DECREF(sub); +#endif + if (result) { + return (int) result; + } + } + return 0; +} + +static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction) { + if (unlikely(PyTuple_Check(substr))) { + return __Pyx_PyUnicode_TailmatchTuple(s, substr, start, end, direction); + } + return (int) PyUnicode_Tailmatch(s, substr, start, end, direction); +} + + +/////////////// bytes_tailmatch.proto /////////////// + +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ +static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ + +/////////////// bytes_tailmatch /////////////// + +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction) { + const char* self_ptr = PyBytes_AS_STRING(self); + Py_ssize_t self_len = PyBytes_GET_SIZE(self); + const char* sub_ptr; + Py_ssize_t sub_len; + int retval; + + Py_buffer view; + view.obj = NULL; + + if ( PyBytes_Check(arg) ) { + sub_ptr = PyBytes_AS_STRING(arg); + sub_len = PyBytes_GET_SIZE(arg); + } +#if PY_MAJOR_VERSION < 3 + // Python 2.x allows mixing unicode and str + else if ( PyUnicode_Check(arg) ) { + return (int) PyUnicode_Tailmatch(self, arg, start, end, direction); + } +#endif + else { + if (unlikely(PyObject_GetBuffer(arg, &view, PyBUF_SIMPLE) == -1)) + return -1; + sub_ptr = (const char*) view.buf; + sub_len = view.len; + } + + if (end > self_len) + end = self_len; + else if (end < 0) + end += self_len; + if (end < 0) + end = 0; + if (start < 0) + start += self_len; + if (start < 0) + start = 0; + + if (direction > 0) { + /* endswith */ + if (end-sub_len > start) + start = end - sub_len; + } + + if (start + sub_len <= end) + retval = !memcmp(self_ptr+start, sub_ptr, (size_t)sub_len); + else + retval = 0; + + if (view.obj) + PyBuffer_Release(&view); + + return retval; +} + +static int __Pyx_PyBytes_TailmatchTuple(PyObject* self, PyObject* substrings, + Py_ssize_t start, Py_ssize_t end, int direction) { + Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); + for (i = 0; i < count; i++) { + int result; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + result = __Pyx_PyBytes_SingleTailmatch(self, PyTuple_GET_ITEM(substrings, i), + start, end, direction); +#else + PyObject* sub = PySequence_ITEM(substrings, i); + if (unlikely(!sub)) return -1; + result = __Pyx_PyBytes_SingleTailmatch(self, sub, start, end, direction); + Py_DECREF(sub); +#endif + if (result) { + return result; + } + } + return 0; +} + +static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction) { + if (unlikely(PyTuple_Check(substr))) { + return __Pyx_PyBytes_TailmatchTuple(self, substr, start, end, direction); + } + + return __Pyx_PyBytes_SingleTailmatch(self, substr, start, end, direction); +} + + +/////////////// str_tailmatch.proto /////////////// + +static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start, + Py_ssize_t end, int direction); /*proto*/ + +/////////////// str_tailmatch /////////////// +//@requires: bytes_tailmatch +//@requires: unicode_tailmatch + +static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start, + Py_ssize_t end, int direction) +{ + // We do not use a C compiler macro here to avoid "unused function" + // warnings for the *_Tailmatch() function that is not being used in + // the specific CPython version. The C compiler will generate the same + // code anyway, and will usually just remove the unused function. + if (PY_MAJOR_VERSION < 3) + return __Pyx_PyBytes_Tailmatch(self, arg, start, end, direction); + else + return __Pyx_PyUnicode_Tailmatch(self, arg, start, end, direction); +} + + +/////////////// bytes_index.proto /////////////// + +static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds); /*proto*/ + +/////////////// bytes_index /////////////// + +static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) { + if (index < 0) + index += PyBytes_GET_SIZE(bytes); + if (check_bounds) { + Py_ssize_t size = PyBytes_GET_SIZE(bytes); + if (unlikely(!__Pyx_is_valid_index(index, size))) { + PyErr_SetString(PyExc_IndexError, "string index out of range"); + return (char) -1; + } + } + return PyBytes_AS_STRING(bytes)[index]; +} + + +//////////////////// StringJoin.proto //////////////////// + +#if PY_MAJOR_VERSION < 3 +#define __Pyx_PyString_Join __Pyx_PyBytes_Join +#define __Pyx_PyBaseString_Join(s, v) (PyUnicode_CheckExact(s) ? PyUnicode_Join(s, v) : __Pyx_PyBytes_Join(s, v)) +#else +#define __Pyx_PyString_Join PyUnicode_Join +#define __Pyx_PyBaseString_Join PyUnicode_Join +#endif +static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values); /*proto*/ + +//////////////////// StringJoin //////////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values) { + // avoid unused function + (void) __Pyx_PyObject_CallMethod1; +#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION < 3 + return _PyString_Join(sep, values); +#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + return _PyBytes_Join(sep, values); +#else + return __Pyx_PyObject_CallMethod1(sep, PYIDENT("join"), values); +#endif +} + + +/////////////// JoinPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char); + +/////////////// JoinPyUnicode /////////////// +//@requires: IncludeStringH +//@substitute: naming + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char) { +#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *result_uval; + int result_ukind, kind_shift; + Py_ssize_t i, char_pos; + void *result_udata; + CYTHON_MAYBE_UNUSED_VAR(max_char); +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + result_uval = PyUnicode_New(result_ulength, max_char); + if (unlikely(!result_uval)) return NULL; + result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; + kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1; + result_udata = PyUnicode_DATA(result_uval); +#else + // Py 2.x/3.2 (pre PEP-393) + result_uval = PyUnicode_FromUnicode(NULL, result_ulength); + if (unlikely(!result_uval)) return NULL; + result_ukind = sizeof(Py_UNICODE); + kind_shift = (result_ukind == 4) ? 2 : result_ukind - 1; + result_udata = PyUnicode_AS_UNICODE(result_uval); +#endif + assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0); + + char_pos = 0; + for (i=0; i < value_count; i++) { + int ukind; + Py_ssize_t ulength; + void *udata; + PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); + if (unlikely(__Pyx_PyUnicode_READY(uval))) + goto bad; + ulength = __Pyx_PyUnicode_GET_LENGTH(uval); + if (unlikely(!ulength)) + continue; + if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos)) + goto overflow; + ukind = __Pyx_PyUnicode_KIND(uval); + udata = __Pyx_PyUnicode_DATA(uval); + if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { + memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift)); + } else { + #if PY_VERSION_HEX >= 0x030d0000 + if (unlikely(PyUnicode_CopyCharacters(result_uval, char_pos, uval, 0, ulength) < 0)) goto bad; + #elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) + _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); + #else + Py_ssize_t j; + for (j=0; j < ulength; j++) { + Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); + __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); + } + #endif + } + char_pos += ulength; + } + return result_uval; +overflow: + PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); +bad: + Py_DECREF(result_uval); + return NULL; +#else + // non-CPython fallback + CYTHON_UNUSED_VAR(max_char); + CYTHON_UNUSED_VAR(result_ulength); + CYTHON_UNUSED_VAR(value_count); + return PyUnicode_Join($empty_unicode, value_tuple); +#endif +} + + +/////////////// BuildPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char); + +/////////////// BuildPyUnicode /////////////// + +// Create a PyUnicode object from an ASCII char*, e.g. a formatted number. + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char) { + PyObject *uval; + Py_ssize_t uoffset = ulength - clength; +#if CYTHON_USE_UNICODE_INTERNALS + Py_ssize_t i; +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + void *udata; + uval = PyUnicode_New(ulength, 127); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_DATA(uval); +#else + // Py 2.x/3.2 (pre PEP-393) + Py_UNICODE *udata; + uval = PyUnicode_FromUnicode(NULL, ulength); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_AS_UNICODE(uval); +#endif + if (uoffset > 0) { + i = 0; + if (prepend_sign) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); + i++; + } + for (; i < uoffset; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); + } + } + for (i=0; i < clength; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); + } + +#else + // non-CPython + { + PyObject *sign = NULL, *padding = NULL; + uval = NULL; + if (uoffset > 0) { + prepend_sign = !!prepend_sign; + if (uoffset > prepend_sign) { + padding = PyUnicode_FromOrdinal(padding_char); + if (likely(padding) && uoffset > prepend_sign + 1) { + PyObject *tmp; + PyObject *repeat = PyInt_FromSsize_t(uoffset - prepend_sign); + if (unlikely(!repeat)) goto done_or_error; + tmp = PyNumber_Multiply(padding, repeat); + Py_DECREF(repeat); + Py_DECREF(padding); + padding = tmp; + } + if (unlikely(!padding)) goto done_or_error; + } + if (prepend_sign) { + sign = PyUnicode_FromOrdinal('-'); + if (unlikely(!sign)) goto done_or_error; + } + } + + uval = PyUnicode_DecodeASCII(chars, clength, NULL); + if (likely(uval) && padding) { + PyObject *tmp = PyNumber_Add(padding, uval); + Py_DECREF(uval); + uval = tmp; + } + if (likely(uval) && sign) { + PyObject *tmp = PyNumber_Add(sign, uval); + Py_DECREF(uval); + uval = tmp; + } +done_or_error: + Py_XDECREF(padding); + Py_XDECREF(sign); + } +#endif + + return uval; +} + + +//////////////////// ByteArrayAppendObject.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value); + +//////////////////// ByteArrayAppendObject //////////////////// +//@requires: ByteArrayAppend + +static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value) { + Py_ssize_t ival; +#if PY_MAJOR_VERSION < 3 + if (unlikely(PyString_Check(value))) { + if (unlikely(PyString_GET_SIZE(value) != 1)) { + PyErr_SetString(PyExc_ValueError, "string must be of size 1"); + return -1; + } + ival = (unsigned char) (PyString_AS_STRING(value)[0]); + } else +#endif +#if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(value)) && likely(__Pyx_PyLong_IsCompact(value))) { + if (__Pyx_PyLong_IsZero(value)) { + ival = 0; + } else { + ival = __Pyx_PyLong_CompactValue(value); + if (unlikely(ival > 255)) goto bad_range; + } + } else +#endif + { + // CPython calls PyNumber_Index() internally + ival = __Pyx_PyIndex_AsSsize_t(value); + if (unlikely(!__Pyx_is_valid_index(ival, 256))) { + if (ival == -1 && PyErr_Occurred()) + return -1; + goto bad_range; + } + } + return __Pyx_PyByteArray_Append(bytearray, ival); +bad_range: + PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)"); + return -1; +} + +//////////////////// ByteArrayAppend.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value); + +//////////////////// ByteArrayAppend //////////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value) { + PyObject *pyval, *retval; +#if CYTHON_COMPILING_IN_CPYTHON + if (likely(__Pyx_is_valid_index(value, 256))) { + Py_ssize_t n = Py_SIZE(bytearray); + if (likely(n != PY_SSIZE_T_MAX)) { + if (unlikely(PyByteArray_Resize(bytearray, n + 1) < 0)) + return -1; + PyByteArray_AS_STRING(bytearray)[n] = value; + return 0; + } + } else { + PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)"); + return -1; + } +#endif + pyval = PyInt_FromLong(value); + if (unlikely(!pyval)) + return -1; + retval = __Pyx_PyObject_CallMethod1(bytearray, PYIDENT("append"), pyval); + Py_DECREF(pyval); + if (unlikely(!retval)) + return -1; + Py_DECREF(retval); + return 0; +} + + +//////////////////// PyObjectFormat.proto //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f); +#else +#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f) +#endif + +//////////////////// PyObjectFormat //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) { + int ret; + _PyUnicodeWriter writer; + + if (likely(PyFloat_CheckExact(obj))) { + // copied from CPython 3.5 "float__format__()" in floatobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyFloat_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else if (likely(PyLong_CheckExact(obj))) { + // copied from CPython 3.5 "long__format__()" in longobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyLong_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else { + return PyObject_Format(obj, format_spec); + } + + if (unlikely(ret == -1)) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); +} +#endif + + +//////////////////// PyObjectFormatSimple.proto //////////////////// + +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#elif PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") : \ + PyObject_Format(s, f)) +#elif CYTHON_USE_TYPE_SLOTS + // Py3 nicely returns unicode strings from str() and repr(), which makes this quite efficient for builtin types. + // In Py3.8+, tp_str() delegates to tp_repr(), so we call tp_repr() directly here. + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) : \ + likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) : \ + PyObject_Format(s, f)) +#else + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#endif + + +//////////////////// PyObjectFormatAndDecref.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f); +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f); + +//////////////////// PyObjectFormatAndDecref //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) { + if (unlikely(!s)) return NULL; + if (likely(PyUnicode_CheckExact(s))) return s; + #if PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + if (likely(PyString_CheckExact(s))) { + PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict"); + Py_DECREF(s); + return result; + } + #endif + return __Pyx_PyObject_FormatAndDecref(s, f); +} + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) { + PyObject *result; + if (unlikely(!s)) return NULL; + result = PyObject_Format(s, f); + Py_DECREF(s); + return result; +} + + +//////////////////// PyUnicode_Unicode.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);/*proto*/ + +//////////////////// PyUnicode_Unicode //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) { + if (unlikely(obj == Py_None)) + obj = PYUNICODE("None"); + return __Pyx_NewRef(obj); +} + + +//////////////////// PyObject_Unicode.proto //////////////////// + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyObject_Unicode(obj) \ + (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj)) +#else +#define __Pyx_PyObject_Unicode(obj) \ + (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Unicode(obj)) +#endif + + +//////////////////// PyStr_Str.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyStr_Str(PyObject *obj);/*proto*/ + +//////////////////// PyStr_Str //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyStr_Str(PyObject *obj) { + if (unlikely(obj == Py_None)) + obj = PYIDENT("None"); + return __Pyx_NewRef(obj); +} + + +//////////////////// PyObject_Str.proto //////////////////// + +#define __Pyx_PyObject_Str(obj) \ + (likely(PyString_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj)) + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/UFuncs_C.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/UFuncs_C.c new file mode 100644 index 0000000000000000000000000000000000000000..2115e4b2b52da8b7e10e173805947efcdc131014 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/UFuncs_C.c @@ -0,0 +1,42 @@ +///////////////////////// UFuncsInit.proto ///////////////////////// +//@proto_block: utility_code_proto_before_types + +#include +#include + +// account for change in type of arguments to PyUFuncGenericFunction in Numpy 1.19.x +// Unfortunately we can only test against Numpy version 1.20.x since it wasn't marked +// as an API break. Therefore, I'm "solving" the issue by casting function pointer types +// on lower Numpy versions. +#if NPY_API_VERSION >= 0x0000000e // Numpy 1.20.x +#define __PYX_PYUFUNCGENERICFUNCTION_CAST(x) x +#else +#define __PYX_PYUFUNCGENERICFUNCTION_CAST(x) (PyUFuncGenericFunction)x +#endif + +/////////////////////// UFuncConsts.proto //////////////////// + +// getter functions because we can't forward-declare arrays +static PyUFuncGenericFunction* {{ufunc_funcs_name}}(void); /* proto */ +static char* {{ufunc_types_name}}(void); /* proto */ +static void* {{ufunc_data_name}}[] = {NULL}; /* always null */ + +/////////////////////// UFuncConsts ///////////////////////// + +static PyUFuncGenericFunction* {{ufunc_funcs_name}}(void) { + static PyUFuncGenericFunction arr[] = { + {{for loop, cname in looper(func_cnames)}} + __PYX_PYUFUNCGENERICFUNCTION_CAST(&{{cname}}){{if not loop.last}},{{endif}} + {{endfor}} + }; + return arr; +} + +static char* {{ufunc_types_name}}(void) { + static char arr[] = { + {{for loop, tp in looper(type_constants)}} + {{tp}}{{if not loop.last}},{{endif}} + {{endfor}} + }; + return arr; +} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2c14270836aff0d9bb1674f9a51da5f47c968cc Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/convert_matrix.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/convert_matrix.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47085aaea1f3089f45f72ca2e2259afb449fac1e Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/__pycache__/convert_matrix.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..376b869a70ce0408db2e202a42feef0494f99e96 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70ae0ad2b4b0724c17fb5f4887a0406175f3a29e Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1aa2c99cfff8a8f63678482171378e50395e5dd Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e004f5c36e05f3cd706045914143f6c6619827ee Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..099ebe9f2de7ac5924a66522f38ed463d2b7b5de Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df2c6cbcaff9848756e3174a7b92cd018738d0d9 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16c0d8af8256dc83a68cc243a3651cc3b7987c6 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c233ec6253a5dc397a7ad184b34c94cc63cb8e6 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..19fb5d117be94c688616a394ea3322e93bfa3e00 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py @@ -0,0 +1,192 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteCentrality: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.K3 = nx.complete_bipartite_graph(3, 3) + cls.C4 = nx.cycle_graph(4) + cls.davis = nx.davis_southern_women_graph() + cls.top_nodes = [ + n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0 + ] + + def test_degree_centrality(self): + d = bipartite.degree_centrality(self.P4, [1, 3]) + answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5} + assert d == answer + d = bipartite.degree_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert d == answer + d = bipartite.degree_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert d == answer + + def test_betweenness_centrality(self): + c = bipartite.betweenness_centrality(self.P4, [1, 3]) + answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0} + assert c == answer + c = bipartite.betweenness_centrality(self.K3, [0, 1, 2]) + answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125} + assert c == answer + c = bipartite.betweenness_centrality(self.C4, [0, 2]) + answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + assert c == answer + + def test_closeness_centrality(self): + c = bipartite.closeness_centrality(self.P4, [1, 3]) + answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3} + assert c == answer + c = bipartite.closeness_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert c == answer + c = bipartite.closeness_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert c == answer + G = nx.Graph() + G.add_node(0) + G.add_node(1) + c = bipartite.closeness_centrality(G, [0]) + assert c == {0: 0.0, 1: 0.0} + c = bipartite.closeness_centrality(G, [1]) + assert c == {0: 0.0, 1: 0.0} + + def test_bipartite_closeness_centrality_unconnected(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(6, 7) + c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False) + answer = { + 0: 10.0 / 7, + 2: 10.0 / 7, + 4: 10.0 / 7, + 6: 10.0, + 1: 10.0 / 7, + 3: 10.0 / 7, + 5: 10.0 / 7, + 7: 10.0, + } + assert c == answer + + def test_davis_degree_centrality(self): + G = self.davis + deg = bipartite.degree_centrality(G, self.top_nodes) + answer = { + "E8": 0.78, + "E9": 0.67, + "E7": 0.56, + "Nora Fayette": 0.57, + "Evelyn Jefferson": 0.57, + "Theresa Anderson": 0.57, + "E6": 0.44, + "Sylvia Avondale": 0.50, + "Laura Mandeville": 0.50, + "Brenda Rogers": 0.50, + "Katherina Rogers": 0.43, + "E5": 0.44, + "Helen Lloyd": 0.36, + "E3": 0.33, + "Ruth DeSand": 0.29, + "Verne Sanderson": 0.29, + "E12": 0.33, + "Myra Liddel": 0.29, + "E11": 0.22, + "Eleanor Nye": 0.29, + "Frances Anderson": 0.29, + "Pearl Oglethorpe": 0.21, + "E4": 0.22, + "Charlotte McDowd": 0.29, + "E10": 0.28, + "Olivia Carleton": 0.14, + "Flora Price": 0.14, + "E2": 0.17, + "E1": 0.17, + "Dorothy Murchison": 0.14, + "E13": 0.17, + "E14": 0.17, + } + for node, value in answer.items(): + assert value == pytest.approx(deg[node], abs=1e-2) + + def test_davis_betweenness_centrality(self): + G = self.davis + bet = bipartite.betweenness_centrality(G, self.top_nodes) + answer = { + "E8": 0.24, + "E9": 0.23, + "E7": 0.13, + "Nora Fayette": 0.11, + "Evelyn Jefferson": 0.10, + "Theresa Anderson": 0.09, + "E6": 0.07, + "Sylvia Avondale": 0.07, + "Laura Mandeville": 0.05, + "Brenda Rogers": 0.05, + "Katherina Rogers": 0.05, + "E5": 0.04, + "Helen Lloyd": 0.04, + "E3": 0.02, + "Ruth DeSand": 0.02, + "Verne Sanderson": 0.02, + "E12": 0.02, + "Myra Liddel": 0.02, + "E11": 0.02, + "Eleanor Nye": 0.01, + "Frances Anderson": 0.01, + "Pearl Oglethorpe": 0.01, + "E4": 0.01, + "Charlotte McDowd": 0.01, + "E10": 0.01, + "Olivia Carleton": 0.01, + "Flora Price": 0.01, + "E2": 0.00, + "E1": 0.00, + "Dorothy Murchison": 0.00, + "E13": 0.00, + "E14": 0.00, + } + for node, value in answer.items(): + assert value == pytest.approx(bet[node], abs=1e-2) + + def test_davis_closeness_centrality(self): + G = self.davis + clos = bipartite.closeness_centrality(G, self.top_nodes) + answer = { + "E8": 0.85, + "E9": 0.79, + "E7": 0.73, + "Nora Fayette": 0.80, + "Evelyn Jefferson": 0.80, + "Theresa Anderson": 0.80, + "E6": 0.69, + "Sylvia Avondale": 0.77, + "Laura Mandeville": 0.73, + "Brenda Rogers": 0.73, + "Katherina Rogers": 0.73, + "E5": 0.59, + "Helen Lloyd": 0.73, + "E3": 0.56, + "Ruth DeSand": 0.71, + "Verne Sanderson": 0.71, + "E12": 0.56, + "Myra Liddel": 0.69, + "E11": 0.54, + "Eleanor Nye": 0.67, + "Frances Anderson": 0.67, + "Pearl Oglethorpe": 0.67, + "E4": 0.54, + "Charlotte McDowd": 0.60, + "E10": 0.55, + "Olivia Carleton": 0.59, + "Flora Price": 0.59, + "E2": 0.52, + "E1": 0.52, + "Dorothy Murchison": 0.65, + "E13": 0.52, + "E14": 0.52, + } + for node, value in answer.items(): + assert value == pytest.approx(clos[node], abs=1e-2) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_project.py new file mode 100644 index 0000000000000000000000000000000000000000..076bb42b668657cad51f6423e5aacf23a2a1cd28 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/tests/test_project.py @@ -0,0 +1,407 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, nodes_equal + + +class TestBipartiteProject: + def test_path_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"): + bipartite.projected_graph(G, [0]) + + def test_path_projected_properties_graph(self): + G = nx.path_graph(4) + G.add_node(1, name="one") + G.add_node(2, name="two") + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + assert P.nodes[1]["name"] == G.nodes[1]["name"] + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + assert P.nodes[2]["name"] == G.nodes[2]["name"] + + def test_path_collaboration_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_directed_path_collaboration_projected_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_graph(self): + G = nx.path_graph(4) + + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.weighted_projected_graph(G, [1, 2, 3, 3]) + + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_digraph_weighted_projection(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) + assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} + assert len(P) == 2 + + def test_path_weighted_projected_directed_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_star_projected_graph(self): + G = nx.star_graph(3) + P = bipartite.projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + P = bipartite.weighted_projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + + P = bipartite.projected_graph(G, [0]) + assert nodes_equal(list(P), [0]) + assert edges_equal(list(P.edges()), []) + + def test_project_multigraph(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("a", 2) + G.add_edge("b", 2) + P = bipartite.projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.weighted_projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.projected_graph(G, "ab", multigraph=True) + assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")]) + + def test_project_collaboration(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("b", 2) + G.add_edge("c", 2) + G.add_edge("c", 3) + G.add_edge("c", 4) + G.add_edge("b", 4) + P = bipartite.collaboration_weighted_projected_graph(G, "abc") + assert P["a"]["b"]["weight"] == 1 + assert P["b"]["c"]["weight"] == 2 + + def test_directed_projection(self): + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge("B", 2) + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 1 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B")]) + + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge(2, "B") + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 2 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")]) + + +class TestBipartiteWeightedProjection: + @classmethod + def setup_class(cls): + # Tore Opsahl's example + # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ + cls.G = nx.Graph() + cls.G.add_edge("A", 1) + cls.G.add_edge("A", 2) + cls.G.add_edge("B", 1) + cls.G.add_edge("B", 2) + cls.G.add_edge("B", 3) + cls.G.add_edge("B", 4) + cls.G.add_edge("B", 5) + cls.G.add_edge("C", 1) + cls.G.add_edge("D", 3) + cls.G.add_edge("E", 4) + cls.G.add_edge("E", 5) + cls.G.add_edge("E", 6) + cls.G.add_edge("F", 6) + # Graph based on figure 6 from Newman (2001) + cls.N = nx.Graph() + cls.N.add_edge("A", 1) + cls.N.add_edge("A", 2) + cls.N.add_edge("A", 3) + cls.N.add_edge("B", 1) + cls.N.add_edge("B", 2) + cls.N.add_edge("B", 3) + cls.N.add_edge("C", 1) + cls.N.add_edge("D", 1) + cls.N.add_edge("E", 3) + + def test_project_weighted_shared(self): + edges = [ + ("A", "B", 2), + ("A", "C", 1), + ("B", "C", 1), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3), + ("A", "E", 1), + ("A", "C", 1), + ("A", "D", 1), + ("B", "E", 1), + ("B", "C", 1), + ("B", "D", 1), + ("C", "D", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_newman(self): + edges = [ + ("A", "B", 1.5), + ("A", "C", 0.5), + ("B", "C", 0.5), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 11 / 6.0), + ("A", "E", 1 / 2.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 2.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_ratio(self): + edges = [ + ("A", "B", 2 / 6.0), + ("A", "C", 1 / 6.0), + ("B", "C", 1 / 6.0), + ("B", "D", 1 / 6.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 6.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_overlap(self): + edges = [ + ("A", "B", 2 / 2.0), + ("A", "C", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("B", "E", 2 / 3.0), + ("E", "F", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 1.0), + ("A", "C", 1 / 1.0), + ("A", "D", 1 / 1.0), + ("B", "E", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_jaccard(self): + edges = [ + ("A", "B", 2 / 5.0), + ("A", "C", 1 / 2.0), + ("B", "C", 1 / 5.0), + ("B", "D", 1 / 5.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in P.edges(): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_generic_weighted_projected_graph_simple(self): + def shared(G, u, v): + return len(set(G[u]) & set(G[v])) + + B = nx.path_graph(5) + G = bipartite.generic_weighted_projected_graph( + B, [0, 2, 4], weight_function=shared + ) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + B = nx.DiGraph() + nx.add_path(B, range(5)) + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})] + ) + + def test_generic_weighted_projected_graph_custom(self): + def jaccard(G, u, v): + unbrs = set(G[u]) + vnbrs = set(G[v]) + return len(unbrs & vnbrs) / len(unbrs | vnbrs) + + def my_weight(G, u, v, weight="weight"): + w = 0 + for nbr in set(G[u]) & set(G[v]): + w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1) + return w + + B = nx.bipartite.complete_bipartite_graph(2, 2) + for i, (u, v) in enumerate(B.edges()): + B.edges[u, v]["weight"] = i + 1 + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=jaccard + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})]) + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=my_weight + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})]) + G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})]) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32d3643e610f1c07aa30aaaa2735b0491072f122 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb07385308a003455c673598bca536dc1faef252 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/atlas.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/atlas.py new file mode 100644 index 0000000000000000000000000000000000000000..8e57ec98b2e76ed4d9107d0c84efad386c0632d3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/atlas.py @@ -0,0 +1,179 @@ +""" +Generators for the small graph atlas. +""" +import gzip +import importlib.resources +import os +import os.path +from itertools import islice + +import networkx as nx + +__all__ = ["graph_atlas", "graph_atlas_g"] + +#: The total number of graphs in the atlas. +#: +#: The graphs are labeled starting from 0 and extending to (but not +#: including) this number. +NUM_GRAPHS = 1253 + +#: The path to the data file containing the graph edge lists. +#: +#: This is the absolute path of the gzipped text file containing the +#: edge list for each graph in the atlas. The file contains one entry +#: per graph in the atlas, in sequential order, starting from graph +#: number 0 and extending through graph number 1252 (see +#: :data:`NUM_GRAPHS`). Each entry looks like +#: +#: .. sourcecode:: text +#: +#: GRAPH 6 +#: NODES 3 +#: 0 1 +#: 0 2 +#: +#: where the first two lines are the graph's index in the atlas and the +#: number of nodes in the graph, and the remaining lines are the edge +#: list. +#: +#: This file was generated from a Python list of graphs via code like +#: the following:: +#: +#: import gzip +#: from networkx.generators.atlas import graph_atlas_g +#: from networkx.readwrite.edgelist import write_edgelist +#: +#: with gzip.open('atlas.dat.gz', 'wb') as f: +#: for i, G in enumerate(graph_atlas_g()): +#: f.write(bytes(f'GRAPH {i}\n', encoding='utf-8')) +#: f.write(bytes(f'NODES {len(G)}\n', encoding='utf-8')) +#: write_edgelist(G, f, data=False) +#: + +# Path to the atlas file +ATLAS_FILE = importlib.resources.files("networkx.generators") / "atlas.dat.gz" + + +def _generate_graphs(): + """Sequentially read the file containing the edge list data for the + graphs in the atlas and generate the graphs one at a time. + + This function reads the file given in :data:`.ATLAS_FILE`. + + """ + with gzip.open(ATLAS_FILE, "rb") as f: + line = f.readline() + while line and line.startswith(b"GRAPH"): + # The first two lines of each entry tell us the index of the + # graph in the list and the number of nodes in the graph. + # They look like this: + # + # GRAPH 3 + # NODES 2 + # + graph_index = int(line[6:].rstrip()) + line = f.readline() + num_nodes = int(line[6:].rstrip()) + # The remaining lines contain the edge list, until the next + # GRAPH line (or until the end of the file). + edgelist = [] + line = f.readline() + while line and not line.startswith(b"GRAPH"): + edgelist.append(line.rstrip()) + line = f.readline() + G = nx.Graph() + G.name = f"G{graph_index}" + G.add_nodes_from(range(num_nodes)) + G.add_edges_from(tuple(map(int, e.split())) for e in edgelist) + yield G + + +@nx._dispatch(graphs=None) +def graph_atlas(i): + """Returns graph number `i` from the Graph Atlas. + + For more information, see :func:`.graph_atlas_g`. + + Parameters + ---------- + i : int + The index of the graph from the atlas to get. The graph at index + 0 is assumed to be the null graph. + + Returns + ------- + list + A list of :class:`~networkx.Graph` objects, the one at index *i* + corresponding to the graph *i* in the Graph Atlas. + + See also + -------- + graph_atlas_g + + Notes + ----- + The time required by this function increases linearly with the + argument `i`, since it reads a large file sequentially in order to + generate the graph [1]_. + + References + ---------- + .. [1] Ronald C. Read and Robin J. Wilson, *An Atlas of Graphs*. + Oxford University Press, 1998. + + """ + if not (0 <= i < NUM_GRAPHS): + raise ValueError(f"index must be between 0 and {NUM_GRAPHS}") + return next(islice(_generate_graphs(), i, None)) + + +@nx._dispatch(graphs=None) +def graph_atlas_g(): + """Returns the list of all graphs with up to seven nodes named in the + Graph Atlas. + + The graphs are listed in increasing order by + + 1. number of nodes, + 2. number of edges, + 3. degree sequence (for example 111223 < 112222), + 4. number of automorphisms, + + in that order, with three exceptions as described in the *Notes* + section below. This causes the list to correspond with the index of + the graphs in the Graph Atlas [atlas]_, with the first graph, + ``G[0]``, being the null graph. + + Returns + ------- + list + A list of :class:`~networkx.Graph` objects, the one at index *i* + corresponding to the graph *i* in the Graph Atlas. + + See also + -------- + graph_atlas + + Notes + ----- + This function may be expensive in both time and space, since it + reads a large file sequentially in order to populate the list. + + Although the NetworkX atlas functions match the order of graphs + given in the "Atlas of Graphs" book, there are (at least) three + errors in the ordering described in the book. The following three + pairs of nodes violate the lexicographically nondecreasing sorted + degree sequence rule: + + - graphs 55 and 56 with degree sequences 001111 and 000112, + - graphs 1007 and 1008 with degree sequences 3333444 and 3333336, + - graphs 1012 and 1213 with degree sequences 1244555 and 1244456. + + References + ---------- + .. [atlas] Ronald C. Read and Robin J. Wilson, + *An Atlas of Graphs*. + Oxford University Press, 1998. + + """ + return list(_generate_graphs()) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/cographs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/cographs.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1f8d71b13533eb7ba87324329421d97f70e050 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/cographs.py @@ -0,0 +1,67 @@ +r"""Generators for cographs + +A cograph is a graph containing no path on four vertices. +Cographs or $P_4$-free graphs can be obtained from a single vertex +by disjoint union and complementation operations. + +References +---------- +.. [0] D.G. Corneil, H. Lerchs, L.Stewart Burlingham, + "Complement reducible graphs", + Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174, + ISSN 0166-218X. +""" +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["random_cograph"] + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_cograph(n, seed=None): + r"""Returns a random cograph with $2 ^ n$ nodes. + + A cograph is a graph containing no path on four vertices. + Cographs or $P_4$-free graphs can be obtained from a single vertex + by disjoint union and complementation operations. + + This generator starts off from a single vertex and performs disjoint + union and full join operations on itself. + The decision on which operation will take place is random. + + Parameters + ---------- + n : int + The order of the cograph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : A random graph containing no path on four vertices. + + See Also + -------- + full_join + union + + References + ---------- + .. [1] D.G. Corneil, H. Lerchs, L.Stewart Burlingham, + "Complement reducible graphs", + Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174, + ISSN 0166-218X. + """ + R = nx.empty_graph(1) + + for i in range(n): + RR = nx.relabel_nodes(R.copy(), lambda x: x + len(R)) + + if seed.randint(0, 1) == 0: + R = nx.full_join(R, RR) + else: + R = nx.disjoint_union(R, RR) + + return R diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/degree_seq.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/degree_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9691101894652f52b59cb41886f2cf5063ee55 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/degree_seq.py @@ -0,0 +1,868 @@ +"""Generate graphs with a given degree sequence or expected degree sequence. +""" + +import heapq +import math +from itertools import chain, combinations, zip_longest +from operator import itemgetter + +import networkx as nx +from networkx.utils import py_random_state, random_weighted_sample + +__all__ = [ + "configuration_model", + "directed_configuration_model", + "expected_degree_graph", + "havel_hakimi_graph", + "directed_havel_hakimi_graph", + "degree_sequence_tree", + "random_degree_sequence_graph", +] + +chaini = chain.from_iterable + + +def _to_stublist(degree_sequence): + """Returns a list of degree-repeated node numbers. + + ``degree_sequence`` is a list of nonnegative integers representing + the degrees of nodes in a graph. + + This function returns a list of node numbers with multiplicities + according to the given degree sequence. For example, if the first + element of ``degree_sequence`` is ``3``, then the first node number, + ``0``, will appear at the head of the returned list three times. The + node numbers are assumed to be the numbers zero through + ``len(degree_sequence) - 1``. + + Examples + -------- + + >>> degree_sequence = [1, 2, 3] + >>> _to_stublist(degree_sequence) + [0, 1, 1, 2, 2, 2] + + If a zero appears in the sequence, that means the node exists but + has degree zero, so that number will be skipped in the returned + list:: + + >>> degree_sequence = [2, 0, 1] + >>> _to_stublist(degree_sequence) + [0, 0, 2] + + """ + return list(chaini([n] * d for n, d in enumerate(degree_sequence))) + + +def _configuration_model( + deg_sequence, create_using, directed=False, in_deg_sequence=None, seed=None +): + """Helper function for generating either undirected or directed + configuration model graphs. + + ``deg_sequence`` is a list of nonnegative integers representing the + degree of the node whose label is the index of the list element. + + ``create_using`` see :func:`~networkx.empty_graph`. + + ``directed`` and ``in_deg_sequence`` are required if you want the + returned graph to be generated using the directed configuration + model algorithm. If ``directed`` is ``False``, then ``deg_sequence`` + is interpreted as the degree sequence of an undirected graph and + ``in_deg_sequence`` is ignored. Otherwise, if ``directed`` is + ``True``, then ``deg_sequence`` is interpreted as the out-degree + sequence and ``in_deg_sequence`` as the in-degree sequence of a + directed graph. + + .. note:: + + ``deg_sequence`` and ``in_deg_sequence`` need not be the same + length. + + ``seed`` is a random.Random or numpy.random.RandomState instance + + This function returns a graph, directed if and only if ``directed`` + is ``True``, generated according to the configuration model + algorithm. For more information on the algorithm, see the + :func:`configuration_model` or :func:`directed_configuration_model` + functions. + + """ + n = len(deg_sequence) + G = nx.empty_graph(n, create_using) + # If empty, return the null graph immediately. + if n == 0: + return G + # Build a list of available degree-repeated nodes. For example, + # for degree sequence [3, 2, 1, 1, 1], the "stub list" is + # initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree + # 3 and thus is repeated 3 times, etc. + # + # Also, shuffle the stub list in order to get a random sequence of + # node pairs. + if directed: + pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0) + # Unzip the list of pairs into a pair of lists. + out_deg, in_deg = zip(*pairs) + + out_stublist = _to_stublist(out_deg) + in_stublist = _to_stublist(in_deg) + + seed.shuffle(out_stublist) + seed.shuffle(in_stublist) + else: + stublist = _to_stublist(deg_sequence) + # Choose a random balanced bipartition of the stublist, which + # gives a random pairing of nodes. In this implementation, we + # shuffle the list and then split it in half. + n = len(stublist) + half = n // 2 + seed.shuffle(stublist) + out_stublist, in_stublist = stublist[:half], stublist[half:] + G.add_edges_from(zip(out_stublist, in_stublist)) + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def configuration_model(deg_sequence, create_using=None, seed=None): + """Returns a random graph with the given degree sequence. + + The configuration model generates a random pseudograph (graph with + parallel edges and self loops) by randomly assigning edges to + match the given degree sequence. + + Parameters + ---------- + deg_sequence : list of nonnegative integers + Each list entry corresponds to the degree of a node. + create_using : NetworkX graph constructor, optional (default MultiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : MultiGraph + A graph with the specified degree sequence. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence. + + Raises + ------ + NetworkXError + If the degree sequence does not have an even sum. + + See Also + -------- + is_graphical + + Notes + ----- + As described by Newman [1]_. + + A non-graphical degree sequence (not realizable by some simple + graph) is allowed since this function returns graphs with self + loops and parallel edges. An exception is raised if the degree + sequence does not have an even sum. + + This configuration model construction process can lead to + duplicate edges and loops. You can remove the self-loops and + parallel edges (see below) which will likely result in a graph + that doesn't have the exact degree sequence specified. + + The density of self-loops and parallel edges tends to decrease as + the number of nodes increases. However, typically the number of + self-loops will approach a Poisson distribution with a nonzero mean, + and similarly for the number of parallel edges. Consider a node + with *k* stubs. The probability of being joined to another stub of + the same node is basically (*k* - *1*) / *N*, where *k* is the + degree and *N* is the number of nodes. So the probability of a + self-loop scales like *c* / *N* for some constant *c*. As *N* grows, + this means we expect *c* self-loops. Similarly for parallel edges. + + References + ---------- + .. [1] M.E.J. Newman, "The structure and function of complex networks", + SIAM REVIEW 45-2, pp 167-256, 2003. + + Examples + -------- + You can create a degree sequence following a particular distribution + by using the one of the distribution functions in + :mod:`~networkx.utils.random_sequence` (or one of your own). For + example, to create an undirected multigraph on one hundred nodes + with degree sequence chosen from the power law distribution: + + >>> sequence = nx.random_powerlaw_tree_sequence(100, tries=5000) + >>> G = nx.configuration_model(sequence) + >>> len(G) + 100 + >>> actual_degrees = [d for v, d in G.degree()] + >>> actual_degrees == sequence + True + + The returned graph is a multigraph, which may have parallel + edges. To remove any parallel edges from the returned graph: + + >>> G = nx.Graph(G) + + Similarly, to remove self-loops: + + >>> G.remove_edges_from(nx.selfloop_edges(G)) + + """ + if sum(deg_sequence) % 2 != 0: + msg = "Invalid degree sequence: sum of degrees must be even, not odd" + raise nx.NetworkXError(msg) + + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXNotImplemented("not implemented for directed graphs") + + G = _configuration_model(deg_sequence, G, seed=seed) + + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def directed_configuration_model( + in_degree_sequence, out_degree_sequence, create_using=None, seed=None +): + """Returns a directed_random graph with the given degree sequences. + + The configuration model generates a random directed pseudograph + (graph with parallel edges and self loops) by randomly assigning + edges to match the given degree sequences. + + Parameters + ---------- + in_degree_sequence : list of nonnegative integers + Each list entry corresponds to the in-degree of a node. + out_degree_sequence : list of nonnegative integers + Each list entry corresponds to the out-degree of a node. + create_using : NetworkX graph constructor, optional (default MultiDiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : MultiDiGraph + A graph with the specified degree sequences. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence. + + Raises + ------ + NetworkXError + If the degree sequences do not have the same sum. + + See Also + -------- + configuration_model + + Notes + ----- + Algorithm as described by Newman [1]_. + + A non-graphical degree sequence (not realizable by some simple + graph) is allowed since this function returns graphs with self + loops and parallel edges. An exception is raised if the degree + sequences does not have the same sum. + + This configuration model construction process can lead to + duplicate edges and loops. You can remove the self-loops and + parallel edges (see below) which will likely result in a graph + that doesn't have the exact degree sequence specified. This + "finite-size effect" decreases as the size of the graph increases. + + References + ---------- + .. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J. + Random graphs with arbitrary degree distributions and their applications + Phys. Rev. E, 64, 026118 (2001) + + Examples + -------- + One can modify the in- and out-degree sequences from an existing + directed graph in order to create a new directed graph. For example, + here we modify the directed path graph: + + >>> D = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + >>> din = list(d for n, d in D.in_degree()) + >>> dout = list(d for n, d in D.out_degree()) + >>> din.append(1) + >>> dout[0] = 2 + >>> # We now expect an edge from node 0 to a new node, node 3. + ... D = nx.directed_configuration_model(din, dout) + + The returned graph is a directed multigraph, which may have parallel + edges. To remove any parallel edges from the returned graph: + + >>> D = nx.DiGraph(D) + + Similarly, to remove self-loops: + + >>> D.remove_edges_from(nx.selfloop_edges(D)) + + """ + if sum(in_degree_sequence) != sum(out_degree_sequence): + msg = "Invalid degree sequences: sequences must have equal sums" + raise nx.NetworkXError(msg) + + if create_using is None: + create_using = nx.MultiDiGraph + + G = _configuration_model( + out_degree_sequence, + create_using, + directed=True, + in_deg_sequence=in_degree_sequence, + seed=seed, + ) + + name = "directed configuration_model {} nodes {} edges" + return G + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def expected_degree_graph(w, seed=None, selfloops=True): + r"""Returns a random graph with given expected degrees. + + Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$ + of length $n$ this algorithm assigns an edge between node $u$ and + node $v$ with probability + + .. math:: + + p_{uv} = \frac{w_u w_v}{\sum_k w_k} . + + Parameters + ---------- + w : list + The list of expected degrees. + selfloops: bool (default=True) + Set to False to remove the possibility of self-loop edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + Graph + + Examples + -------- + >>> z = [10 for i in range(100)] + >>> G = nx.expected_degree_graph(z) + + Notes + ----- + The nodes have integer labels corresponding to index of expected degrees + input sequence. + + The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the + number of nodes and $m$ is the expected number of edges. + + The model in [1]_ includes the possibility of self-loop edges. + Set selfloops=False to produce a graph without self loops. + + For finite graphs this model doesn't produce exactly the given + expected degree sequence. Instead the expected degrees are as + follows. + + For the case without self loops (selfloops=False), + + .. math:: + + E[deg(u)] = \sum_{v \ne u} p_{uv} + = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) . + + + NetworkX uses the standard convention that a self-loop edge counts 2 + in the degree of a node, so with self loops (selfloops=True), + + .. math:: + + E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu} + = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) . + + References + ---------- + .. [1] Fan Chung and L. Lu, Connected components in random graphs with + given expected degree sequences, Ann. Combinatorics, 6, + pp. 125-145, 2002. + .. [2] Joel Miller and Aric Hagberg, + Efficient generation of networks with given expected degrees, + in Algorithms and Models for the Web-Graph (WAW 2011), + Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732, + pp. 115-126, 2011. + """ + n = len(w) + G = nx.empty_graph(n) + + # If there are no nodes are no edges in the graph, return the empty graph. + if n == 0 or max(w) == 0: + return G + + rho = 1 / sum(w) + # Sort the weights in decreasing order. The original order of the + # weights dictates the order of the (integer) node labels, so we + # need to remember the permutation applied in the sorting. + order = sorted(enumerate(w), key=itemgetter(1), reverse=True) + mapping = {c: u for c, (u, v) in enumerate(order)} + seq = [v for u, v in order] + last = n + if not selfloops: + last -= 1 + for u in range(last): + v = u + if not selfloops: + v += 1 + factor = seq[u] * rho + p = min(seq[v] * factor, 1) + while v < n and p > 0: + if p != 1: + r = seed.random() + v += math.floor(math.log(r, 1 - p)) + if v < n: + q = min(seq[v] * factor, 1) + if seed.random() < q / p: + G.add_edge(mapping[u], mapping[v]) + v += 1 + p = q + return G + + +@nx._dispatch(graphs=None) +def havel_hakimi_graph(deg_sequence, create_using=None): + """Returns a simple graph with given degree sequence constructed + using the Havel-Hakimi algorithm. + + Parameters + ---------- + deg_sequence: list of integers + Each integer corresponds to the degree of a node (need not be sorted). + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Directed graphs are not allowed. + + Raises + ------ + NetworkXException + For a non-graphical degree sequence (i.e. one + not realizable by some simple graph). + + Notes + ----- + The Havel-Hakimi algorithm constructs a simple graph by + successively connecting the node of highest degree to other nodes + of highest degree, resorting remaining nodes by degree, and + repeating the process. The resulting graph has a high + degree-associativity. Nodes are labeled 1,.., len(deg_sequence), + corresponding to their position in deg_sequence. + + The basic algorithm is from Hakimi [1]_ and was generalized by + Kleitman and Wang [2]_. + + References + ---------- + .. [1] Hakimi S., On Realizability of a Set of Integers as + Degrees of the Vertices of a Linear Graph. I, + Journal of SIAM, 10(3), pp. 496-506 (1962) + .. [2] Kleitman D.J. and Wang D.L. + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + if not nx.is_graphical(deg_sequence): + raise nx.NetworkXError("Invalid degree sequence") + + p = len(deg_sequence) + G = nx.empty_graph(p, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed graphs are not supported") + num_degs = [[] for i in range(p)] + dmax, dsum, n = 0, 0, 0 + for d in deg_sequence: + # Process only the non-zero integers + if d > 0: + num_degs[d].append(n) + dmax, dsum, n = max(dmax, d), dsum + d, n + 1 + # Return graph if no edges + if n == 0: + return G + + modstubs = [(0, 0)] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while len(num_degs[dmax]) == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + raise nx.NetworkXError("Non-graphical integer sequence") + + # Remove largest stub in list + source = num_degs[dmax].pop() + n -= 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while len(num_degs[k]) == 0: + k -= 1 + target = num_degs[k].pop() + G.add_edge(source, target) + n -= 1 + if k > 1: + modstubs[mslen] = (k - 1, target) + mslen += 1 + # Add back to the list any nonzero stubs that were removed + for i in range(mslen): + (stubval, stubtarget) = modstubs[i] + num_degs[stubval].append(stubtarget) + n += 1 + + return G + + +@nx._dispatch(graphs=None) +def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence, create_using=None): + """Returns a directed graph with the given degree sequences. + + Parameters + ---------- + in_deg_sequence : list of integers + Each list entry corresponds to the in-degree of a node. + out_deg_sequence : list of integers + Each list entry corresponds to the out-degree of a node. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : DiGraph + A graph with the specified degree sequences. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence + + Raises + ------ + NetworkXError + If the degree sequences are not digraphical. + + See Also + -------- + configuration_model + + Notes + ----- + Algorithm as described by Kleitman and Wang [1]_. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence) + + # Process the sequences and form two heaps to store degree pairs with + # either zero or nonzero out degrees + sumin, sumout = 0, 0 + nin, nout = len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + G = nx.empty_graph(maxn, create_using, default=nx.DiGraph) + if maxn == 0: + return G + maxin = 0 + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + raise nx.NetworkXError( + "Invalid degree sequences. Sequence values must be positive." + ) + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg, n)) + elif out_deg > 0: + zeroheap.append((-1 * out_deg, n)) + if sumin != sumout: + raise nx.NetworkXError( + "Invalid degree sequences. Sequences must have equal sums." + ) + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum + while stubheap: + # Remove first value in the sequence with a non-zero in degree + (freeout, freein, target) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + raise nx.NetworkXError("Non-digraphical integer sequence") + + # Attach arcs from the nodes with the most stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]): + (stubout, stubsource) = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin, stubsource) = heapq.heappop(stubheap) + if stubout == 0: + raise nx.NetworkXError("Non-digraphical integer sequence") + G.add_edge(stubsource, target) + # Check if source is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin, stubsource) + mslen += 1 + + # Add the nodes back to the heaps that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, (stub[0], stub[2])) + if freeout < 0: + heapq.heappush(zeroheap, (freeout, target)) + + return G + + +@nx._dispatch(graphs=None) +def degree_sequence_tree(deg_sequence, create_using=None): + """Make a tree for the given degree sequence. + + A tree has #nodes-#edges=1 so + the degree sequence must have + len(deg_sequence)-sum(deg_sequence)/2=1 + """ + # The sum of the degree sequence must be even (for any undirected graph). + degree_sum = sum(deg_sequence) + if degree_sum % 2 != 0: + msg = "Invalid degree sequence: sum of degrees must be even, not odd" + raise nx.NetworkXError(msg) + if len(deg_sequence) - degree_sum // 2 != 1: + msg = ( + "Invalid degree sequence: tree must have number of nodes equal" + " to one less than the number of edges" + ) + raise nx.NetworkXError(msg) + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # Sort all degrees greater than 1 in decreasing order. + # + # TODO Does this need to be sorted in reverse order? + deg = sorted((s for s in deg_sequence if s > 1), reverse=True) + + # make path graph as backbone + n = len(deg) + 2 + nx.add_path(G, range(n)) + last = n + + # add the leaves + for source in range(1, n - 1): + nedges = deg.pop() - 2 + for target in range(last, last + nedges): + G.add_edge(source, target) + last += nedges + + # in case we added one too many + if len(G) > len(deg_sequence): + G.remove_node(0) + return G + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_degree_sequence_graph(sequence, seed=None, tries=10): + r"""Returns a simple random graph with the given degree sequence. + + If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the + algorithm produces almost uniform random graphs in $O(m d_m)$ time + where $m$ is the number of edges. + + Parameters + ---------- + sequence : list of integers + Sequence of degrees + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int, optional + Maximum number of tries to create a graph + + Returns + ------- + G : Graph + A graph with the specified degree sequence. + Nodes are labeled starting at 0 with an index + corresponding to the position in the sequence. + + Raises + ------ + NetworkXUnfeasible + If the degree sequence is not graphical. + NetworkXError + If a graph is not produced in specified number of tries + + See Also + -------- + is_graphical, configuration_model + + Notes + ----- + The generator algorithm [1]_ is not guaranteed to produce a graph. + + References + ---------- + .. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi, + A sequential algorithm for generating random graphs. + Algorithmica, Volume 58, Number 4, 860-910, + DOI: 10.1007/s00453-009-9340-1 + + Examples + -------- + >>> sequence = [1, 2, 2, 3] + >>> G = nx.random_degree_sequence_graph(sequence, seed=42) + >>> sorted(d for n, d in G.degree()) + [1, 2, 2, 3] + """ + DSRG = DegreeSequenceRandomGraph(sequence, seed) + for try_n in range(tries): + try: + return DSRG.generate() + except nx.NetworkXUnfeasible: + pass + raise nx.NetworkXError(f"failed to generate graph in {tries} tries") + + +class DegreeSequenceRandomGraph: + # class to generate random graphs with a given degree sequence + # use random_degree_sequence_graph() + def __init__(self, degree, rng): + if not nx.is_graphical(degree): + raise nx.NetworkXUnfeasible("degree sequence is not graphical") + self.rng = rng + self.degree = list(degree) + # node labels are integers 0,...,n-1 + self.m = sum(self.degree) / 2.0 # number of edges + try: + self.dmax = max(self.degree) # maximum degree + except ValueError: + self.dmax = 0 + + def generate(self): + # remaining_degree is mapping from int->remaining degree + self.remaining_degree = dict(enumerate(self.degree)) + # add all nodes to make sure we get isolated nodes + self.graph = nx.Graph() + self.graph.add_nodes_from(self.remaining_degree) + # remove zero degree nodes + for n, d in list(self.remaining_degree.items()): + if d == 0: + del self.remaining_degree[n] + if len(self.remaining_degree) > 0: + # build graph in three phases according to how many unmatched edges + self.phase1() + self.phase2() + self.phase3() + return self.graph + + def update_remaining(self, u, v, aux_graph=None): + # decrement remaining nodes, modify auxiliary graph if in phase3 + if aux_graph is not None: + # remove edges from auxiliary graph + aux_graph.remove_edge(u, v) + if self.remaining_degree[u] == 1: + del self.remaining_degree[u] + if aux_graph is not None: + aux_graph.remove_node(u) + else: + self.remaining_degree[u] -= 1 + if self.remaining_degree[v] == 1: + del self.remaining_degree[v] + if aux_graph is not None: + aux_graph.remove_node(v) + else: + self.remaining_degree[v] -= 1 + + def p(self, u, v): + # degree probability + return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m) + + def q(self, u, v): + # remaining degree probability + norm = max(self.remaining_degree.values()) ** 2 + return self.remaining_degree[u] * self.remaining_degree[v] / norm + + def suitable_edge(self): + """Returns True if and only if an arbitrary remaining node can + potentially be joined with some other remaining node. + + """ + nodes = iter(self.remaining_degree) + u = next(nodes) + return any(v not in self.graph[u] for v in nodes) + + def phase1(self): + # choose node pairs from (degree) weighted distribution + rem_deg = self.remaining_degree + while sum(rem_deg.values()) >= 2 * self.dmax**2: + u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng)) + if self.graph.has_edge(u, v): + continue + if self.rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v) + + def phase2(self): + # choose remaining nodes uniformly at random and use rejection sampling + remaining_deg = self.remaining_degree + rng = self.rng + while len(remaining_deg) >= 2 * self.dmax: + while True: + u, v = sorted(rng.sample(list(remaining_deg.keys()), 2)) + if self.graph.has_edge(u, v): + continue + if rng.random() < self.q(u, v): + break + if rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v) + + def phase3(self): + # build potential remaining edges and choose with rejection sampling + potential_edges = combinations(self.remaining_degree, 2) + # build auxiliary graph of potential edges not already in graph + H = nx.Graph( + [(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)] + ) + rng = self.rng + while self.remaining_degree: + if not self.suitable_edge(): + raise nx.NetworkXUnfeasible("no suitable edges left") + while True: + u, v = sorted(rng.choice(list(H.edges()))) + if rng.random() < self.q(u, v): + break + if rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v, aux_graph=H) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/directed.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/directed.py new file mode 100644 index 0000000000000000000000000000000000000000..e084ebe8c4f55e5349133c859ab083f85377c2cd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/directed.py @@ -0,0 +1,501 @@ +""" +Generators for some directed graphs, including growing network (GN) graphs and +scale-free graphs. + +""" + +import numbers +from collections import Counter + +import networkx as nx +from networkx.generators.classic import empty_graph +from networkx.utils import discrete_sequence, py_random_state, weighted_choice + +__all__ = [ + "gn_graph", + "gnc_graph", + "gnr_graph", + "random_k_out_graph", + "scale_free_graph", +] + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def gn_graph(n, kernel=None, create_using=None, seed=None): + """Returns the growing network (GN) digraph with `n` nodes. + + The GN graph is built by adding nodes one at a time with a link to one + previously added node. The target node for the link is chosen with + probability based on degree. The default attachment kernel is a linear + function of the degree of a node. + + The graph is always a (directed) tree. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + kernel : function + The attachment kernel. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Examples + -------- + To create the undirected GN graph, use the :meth:`~DiGraph.to_directed` + method:: + + >>> D = nx.gn_graph(10) # the GN graph + >>> G = D.to_undirected() # the undirected version + + To specify an attachment kernel, use the `kernel` keyword argument:: + + >>> D = nx.gn_graph(10, kernel=lambda x: x ** 1.5) # A_k = k^1.5 + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Organization of Growing Random Networks, + Phys. Rev. E, 63, 066123, 2001. + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if kernel is None: + + def kernel(x): + return x + + if n == 1: + return G + + G.add_edge(1, 0) # get started + ds = [1, 1] # degree sequence + + for source in range(2, n): + # compute distribution from kernel and degree + dist = [kernel(d) for d in ds] + # choose target from discrete distribution + target = discrete_sequence(1, distribution=dist, seed=seed)[0] + G.add_edge(source, target) + ds.append(1) # the source has only one link (degree one) + ds[target] += 1 # add one to the target link degree + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def gnr_graph(n, p, create_using=None, seed=None): + """Returns the growing network with redirection (GNR) digraph with `n` + nodes and redirection probability `p`. + + The GNR graph is built by adding nodes one at a time with a link to one + previously added node. The previous target node is chosen uniformly at + random. With probability `p` the link is instead "redirected" to the + successor node of the target. + + The graph is always a (directed) tree. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + p : float + The redirection probability. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Examples + -------- + To create the undirected GNR graph, use the :meth:`~DiGraph.to_directed` + method:: + + >>> D = nx.gnr_graph(10, 0.5) # the GNR graph + >>> G = D.to_undirected() # the undirected version + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Organization of Growing Random Networks, + Phys. Rev. E, 63, 066123, 2001. + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if n == 1: + return G + + for source in range(1, n): + target = seed.randrange(0, source) + if seed.random() < p and target != 0: + target = next(G.successors(target)) + G.add_edge(source, target) + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def gnc_graph(n, create_using=None, seed=None): + """Returns the growing network with copying (GNC) digraph with `n` nodes. + + The GNC graph is built by adding nodes one at a time with a link to one + previously added node (chosen uniformly at random) and to all of that + node's successors. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Network Growth by Copying, + Phys. Rev. E, 71, 036118, 2005k.}, + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if n == 1: + return G + + for source in range(1, n): + target = seed.randrange(0, source) + for succ in G.successors(target): + G.add_edge(source, succ) + G.add_edge(source, target) + return G + + +@py_random_state(6) +@nx._dispatch(graphs=None) +def scale_free_graph( + n, + alpha=0.41, + beta=0.54, + gamma=0.05, + delta_in=0.2, + delta_out=0, + seed=None, + initial_graph=None, +): + """Returns a scale-free directed graph. + + Parameters + ---------- + n : integer + Number of nodes in graph + alpha : float + Probability for adding a new node connected to an existing node + chosen randomly according to the in-degree distribution. + beta : float + Probability for adding an edge between two existing nodes. + One existing node is chosen randomly according the in-degree + distribution and the other chosen randomly according to the out-degree + distribution. + gamma : float + Probability for adding a new node connected to an existing node + chosen randomly according to the out-degree distribution. + delta_in : float + Bias for choosing nodes from in-degree distribution. + delta_out : float + Bias for choosing nodes from out-degree distribution. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : MultiDiGraph instance, optional + Build the scale-free graph starting from this initial MultiDiGraph, + if provided. + + Returns + ------- + MultiDiGraph + + Examples + -------- + Create a scale-free graph on one hundred nodes:: + + >>> G = nx.scale_free_graph(100) + + Notes + ----- + The sum of `alpha`, `beta`, and `gamma` must be 1. + + References + ---------- + .. [1] B. Bollobás, C. Borgs, J. Chayes, and O. Riordan, + Directed scale-free graphs, + Proceedings of the fourteenth annual ACM-SIAM Symposium on + Discrete Algorithms, 132--139, 2003. + """ + + def _choose_node(candidates, node_list, delta): + if delta > 0: + bias_sum = len(node_list) * delta + p_delta = bias_sum / (bias_sum + len(candidates)) + if seed.random() < p_delta: + return seed.choice(node_list) + return seed.choice(candidates) + + if initial_graph is not None and hasattr(initial_graph, "_adj"): + if not isinstance(initial_graph, nx.MultiDiGraph): + raise nx.NetworkXError("initial_graph must be a MultiDiGraph.") + G = initial_graph + else: + # Start with 3-cycle + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 0)]) + + if alpha <= 0: + raise ValueError("alpha must be > 0.") + if beta <= 0: + raise ValueError("beta must be > 0.") + if gamma <= 0: + raise ValueError("gamma must be > 0.") + + if abs(alpha + beta + gamma - 1.0) >= 1e-9: + raise ValueError("alpha+beta+gamma must equal 1.") + + if delta_in < 0: + raise ValueError("delta_in must be >= 0.") + + if delta_out < 0: + raise ValueError("delta_out must be >= 0.") + + # pre-populate degree states + vs = sum((count * [idx] for idx, count in G.out_degree()), []) + ws = sum((count * [idx] for idx, count in G.in_degree()), []) + + # pre-populate node state + node_list = list(G.nodes()) + + # see if there already are number-based nodes + numeric_nodes = [n for n in node_list if isinstance(n, numbers.Number)] + if len(numeric_nodes) > 0: + # set cursor for new nodes appropriately + cursor = max(int(n.real) for n in numeric_nodes) + 1 + else: + # or start at zero + cursor = 0 + + while len(G) < n: + r = seed.random() + + # random choice in alpha,beta,gamma ranges + if r < alpha: + # alpha + # add new node v + v = cursor + cursor += 1 + # also add to node state + node_list.append(v) + # choose w according to in-degree and delta_in + w = _choose_node(ws, node_list, delta_in) + + elif r < alpha + beta: + # beta + # choose v according to out-degree and delta_out + v = _choose_node(vs, node_list, delta_out) + # choose w according to in-degree and delta_in + w = _choose_node(ws, node_list, delta_in) + + else: + # gamma + # choose v according to out-degree and delta_out + v = _choose_node(vs, node_list, delta_out) + # add new node w + w = cursor + cursor += 1 + # also add to node state + node_list.append(w) + + # add edge to graph + G.add_edge(v, w) + + # update degree states + vs.append(v) + ws.append(w) + + return G + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True, seed=None): + """Returns a random `k`-out graph with uniform attachment. + + A random `k`-out graph with uniform attachment is a multidigraph + generated by the following algorithm. For each node *u*, choose + `k` nodes *v* uniformly at random (with replacement). Add a + directed edge joining *u* to *v*. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + + k : int + The out-degree of each node in the returned graph. + + self_loops : bool + If True, self-loops are allowed when generating the graph. + + with_replacement : bool + If True, neighbors are chosen with replacement and the + returned graph will be a directed multigraph. Otherwise, + neighbors are chosen without replacement and the returned graph + will be a directed graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A `k`-out-regular directed graph generated according to the + above algorithm. It will be a multigraph if and only if + `with_replacement` is True. + + Raises + ------ + ValueError + If `with_replacement` is False and `k` is greater than + `n`. + + See also + -------- + random_k_out_graph + + Notes + ----- + The return digraph or multidigraph may not be strongly connected, or + even weakly connected. + + If `with_replacement` is True, this function is similar to + :func:`random_k_out_graph`, if that function had parameter `alpha` + set to positive infinity. + + """ + if with_replacement: + create_using = nx.MultiDiGraph() + + def sample(v, nodes): + if not self_loops: + nodes = nodes - {v} + return (seed.choice(list(nodes)) for i in range(k)) + + else: + create_using = nx.DiGraph() + + def sample(v, nodes): + if not self_loops: + nodes = nodes - {v} + return seed.sample(list(nodes), k) + + G = nx.empty_graph(n, create_using) + nodes = set(G) + for u in G: + G.add_edges_from((u, v) for v in sample(u, nodes)) + return G + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def random_k_out_graph(n, k, alpha, self_loops=True, seed=None): + """Returns a random `k`-out graph with preferential attachment. + + A random `k`-out graph with preferential attachment is a + multidigraph generated by the following algorithm. + + 1. Begin with an empty digraph, and initially set each node to have + weight `alpha`. + 2. Choose a node `u` with out-degree less than `k` uniformly at + random. + 3. Choose a node `v` from with probability proportional to its + weight. + 4. Add a directed edge from `u` to `v`, and increase the weight + of `v` by one. + 5. If each node has out-degree `k`, halt, otherwise repeat from + step 2. + + For more information on this model of random graph, see [1]. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + + k : int + The out-degree of each node in the returned graph. + + alpha : float + A positive :class:`float` representing the initial weight of + each vertex. A higher number means that in step 3 above, nodes + will be chosen more like a true uniformly random sample, and a + lower number means that nodes are more likely to be chosen as + their in-degree increases. If this parameter is not positive, a + :exc:`ValueError` is raised. + + self_loops : bool + If True, self-loops are allowed when generating the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`~networkx.classes.MultiDiGraph` + A `k`-out-regular multidigraph generated according to the above + algorithm. + + Raises + ------ + ValueError + If `alpha` is not positive. + + Notes + ----- + The returned multidigraph may not be strongly connected, or even + weakly connected. + + References + ---------- + [1]: Peterson, Nicholas R., and Boris Pittel. + "Distance between two random `k`-out digraphs, with and without + preferential attachment." + arXiv preprint arXiv:1311.5961 (2013). + + + """ + if alpha < 0: + raise ValueError("alpha must be positive") + G = nx.empty_graph(n, create_using=nx.MultiDiGraph) + weights = Counter({v: alpha for v in G}) + for i in range(k * n): + u = seed.choice([v for v, d in G.out_degree() if d < k]) + # If self-loops are not allowed, make the source node `u` have + # weight zero. + if not self_loops: + adjustment = Counter({u: weights[u]}) + else: + adjustment = Counter() + v = weighted_choice(weights - adjustment, seed=seed) + G.add_edge(u, v) + weights[v] += 1 + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/duplication.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/duplication.py new file mode 100644 index 0000000000000000000000000000000000000000..6daa5a9c738ceb61d9d4003a0df0e82f5282a158 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/duplication.py @@ -0,0 +1,163 @@ +"""Functions for generating graphs based on the "duplication" method. + +These graph generators start with a small initial graph then duplicate +nodes and (partially) duplicate their edges. These functions are +generally inspired by biological networks. + +""" +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import py_random_state + +__all__ = ["partial_duplication_graph", "duplication_divergence_graph"] + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def partial_duplication_graph(N, n, p, q, seed=None): + """Returns a random graph using the partial duplication model. + + Parameters + ---------- + N : int + The total number of nodes in the final graph. + + n : int + The number of nodes in the initial clique. + + p : float + The probability of joining each neighbor of a node to the + duplicate node. Must be a number in the between zero and one, + inclusive. + + q : float + The probability of joining the source node to the duplicate + node. Must be a number in the between zero and one, inclusive. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + A graph of nodes is grown by creating a fully connected graph + of size `n`. The following procedure is then repeated until + a total of `N` nodes have been reached. + + 1. A random node, *u*, is picked and a new node, *v*, is created. + 2. For each neighbor of *u* an edge from the neighbor to *v* is created + with probability `p`. + 3. An edge from *u* to *v* is created with probability `q`. + + This algorithm appears in [1]. + + This implementation allows the possibility of generating + disconnected graphs. + + References + ---------- + .. [1] Knudsen Michael, and Carsten Wiuf. "A Markov chain approach to + randomly grown graphs." Journal of Applied Mathematics 2008. + + + """ + if p < 0 or p > 1 or q < 0 or q > 1: + msg = "partial duplication graph must have 0 <= p, q <= 1." + raise NetworkXError(msg) + if n > N: + raise NetworkXError("partial duplication graph must have n <= N.") + + G = nx.complete_graph(n) + for new_node in range(n, N): + # Pick a random vertex, u, already in the graph. + src_node = seed.randint(0, new_node - 1) + + # Add a new vertex, v, to the graph. + G.add_node(new_node) + + # For each neighbor of u... + for neighbor_node in list(nx.all_neighbors(G, src_node)): + # Add the neighbor to v with probability p. + if seed.random() < p: + G.add_edge(new_node, neighbor_node) + + # Join v and u with probability q. + if seed.random() < q: + G.add_edge(new_node, src_node) + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def duplication_divergence_graph(n, p, seed=None): + """Returns an undirected graph using the duplication-divergence model. + + A graph of `n` nodes is created by duplicating the initial nodes + and retaining edges incident to the original nodes with a retention + probability `p`. + + Parameters + ---------- + n : int + The desired number of nodes in the graph. + p : float + The probability for retaining the edge of the replicated node. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `p` is not a valid probability. + If `n` is less than 2. + + Notes + ----- + This algorithm appears in [1]. + + This implementation disallows the possibility of generating + disconnected graphs. + + References + ---------- + .. [1] I. Ispolatov, P. L. Krapivsky, A. Yuryev, + "Duplication-divergence model of protein interaction network", + Phys. Rev. E, 71, 061911, 2005. + + """ + if p > 1 or p < 0: + msg = f"NetworkXError p={p} is not in [0,1]." + raise nx.NetworkXError(msg) + if n < 2: + msg = "n must be greater than or equal to 2" + raise nx.NetworkXError(msg) + + G = nx.Graph() + + # Initialize the graph with two connected nodes. + G.add_edge(0, 1) + i = 2 + while i < n: + # Choose a random node from current graph to duplicate. + random_node = seed.choice(list(G)) + # Make the replica. + G.add_node(i) + # flag indicates whether at least one edge is connected on the replica. + flag = False + for nbr in G.neighbors(random_node): + if seed.random() < p: + # Link retention step. + G.add_edge(i, nbr) + flag = True + if not flag: + # Delete replica if no edges retained. + G.remove_node(i) + else: + # Successful duplication. + i += 1 + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/ego.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/ego.py new file mode 100644 index 0000000000000000000000000000000000000000..d0513948a51ff8080c31d525f22a68ba4939fa00 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/ego.py @@ -0,0 +1,65 @@ +""" +Ego graph. +""" +__all__ = ["ego_graph"] + +import networkx as nx + + +@nx._dispatch(edge_attrs="distance") +def ego_graph(G, n, radius=1, center=True, undirected=False, distance=None): + """Returns induced subgraph of neighbors centered at node n within + a given radius. + + Parameters + ---------- + G : graph + A NetworkX Graph or DiGraph + + n : node + A single node + + radius : number, optional + Include all neighbors of distance<=radius from n. + + center : bool, optional + If False, do not include center node in graph + + undirected : bool, optional + If True use both in- and out-neighbors of directed graphs. + + distance : key, optional + Use specified edge data key as distance. For example, setting + distance='weight' will use the edge weight to measure the + distance from the node n. + + Notes + ----- + For directed graphs D this produces the "out" neighborhood + or successors. If you want the neighborhood of predecessors + first reverse the graph with D.reverse(). If you want both + directions use the keyword argument undirected=True. + + Node, edge, and graph attributes are copied to the returned subgraph. + """ + if undirected: + if distance is not None: + sp, _ = nx.single_source_dijkstra( + G.to_undirected(), n, cutoff=radius, weight=distance + ) + else: + sp = dict( + nx.single_source_shortest_path_length( + G.to_undirected(), n, cutoff=radius + ) + ) + else: + if distance is not None: + sp, _ = nx.single_source_dijkstra(G, n, cutoff=radius, weight=distance) + else: + sp = dict(nx.single_source_shortest_path_length(G, n, cutoff=radius)) + + H = G.subgraph(sp).copy() + if not center: + H.remove_node(n) + return H diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/expanders.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/expanders.py new file mode 100644 index 0000000000000000000000000000000000000000..3d0c0a68bdba1c749f2b2cdb7d6b2d2dc50e971c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/expanders.py @@ -0,0 +1,206 @@ +"""Provides explicit constructions of expander graphs. + +""" +import itertools + +import networkx as nx + +__all__ = ["margulis_gabber_galil_graph", "chordal_cycle_graph", "paley_graph"] + + +# Other discrete torus expanders can be constructed by using the following edge +# sets. For more information, see Chapter 4, "Expander Graphs", in +# "Pseudorandomness", by Salil Vadhan. +# +# For a directed expander, add edges from (x, y) to: +# +# (x, y), +# ((x + 1) % n, y), +# (x, (y + 1) % n), +# (x, (x + y) % n), +# (-y % n, x) +# +# For an undirected expander, add the reverse edges. +# +# Also appearing in the paper of Gabber and Galil: +# +# (x, y), +# (x, (x + y) % n), +# (x, (x + y + 1) % n), +# ((x + y) % n, y), +# ((x + y + 1) % n, y) +# +# and: +# +# (x, y), +# ((x + 2*y) % n, y), +# ((x + (2*y + 1)) % n, y), +# ((x + (2*y + 2)) % n, y), +# (x, (y + 2*x) % n), +# (x, (y + (2*x + 1)) % n), +# (x, (y + (2*x + 2)) % n), +# +@nx._dispatch(graphs=None) +def margulis_gabber_galil_graph(n, create_using=None): + r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes. + + The undirected MultiGraph is regular with degree `8`. Nodes are integer + pairs. The second-largest eigenvalue of the adjacency matrix of the graph + is at most `5 \sqrt{2}`, regardless of `n`. + + Parameters + ---------- + n : int + Determines the number of nodes in the graph: `n^2`. + create_using : NetworkX graph constructor, optional (default MultiGraph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed undirected multigraph. + + Raises + ------ + NetworkXError + If the graph is directed or not a multigraph. + + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed() or not G.is_multigraph(): + msg = "`create_using` must be an undirected multigraph." + raise nx.NetworkXError(msg) + + for x, y in itertools.product(range(n), repeat=2): + for u, v in ( + ((x + 2 * y) % n, y), + ((x + (2 * y + 1)) % n, y), + (x, (y + 2 * x) % n), + (x, (y + (2 * x + 1)) % n), + ): + G.add_edge((x, y), (u, v)) + G.graph["name"] = f"margulis_gabber_galil_graph({n})" + return G + + +@nx._dispatch(graphs=None) +def chordal_cycle_graph(p, create_using=None): + """Returns the chordal cycle graph on `p` nodes. + + The returned graph is a cycle graph on `p` nodes with chords joining each + vertex `x` to its inverse modulo `p`. This graph is a (mildly explicit) + 3-regular expander [1]_. + + `p` *must* be a prime number. + + Parameters + ---------- + p : a prime number + + The number of vertices in the graph. This also indicates where the + chordal edges in the cycle will be created. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed undirected multigraph. + + Raises + ------ + NetworkXError + + If `create_using` indicates directed or not a multigraph. + + References + ---------- + + .. [1] Theorem 4.4.2 in A. Lubotzky. "Discrete groups, expanding graphs and + invariant measures", volume 125 of Progress in Mathematics. + Birkhäuser Verlag, Basel, 1994. + + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed() or not G.is_multigraph(): + msg = "`create_using` must be an undirected multigraph." + raise nx.NetworkXError(msg) + + for x in range(p): + left = (x - 1) % p + right = (x + 1) % p + # Here we apply Fermat's Little Theorem to compute the multiplicative + # inverse of x in Z/pZ. By Fermat's Little Theorem, + # + # x^p = x (mod p) + # + # Therefore, + # + # x * x^(p - 2) = 1 (mod p) + # + # The number 0 is a special case: we just let its inverse be itself. + chord = pow(x, p - 2, p) if x > 0 else 0 + for y in (left, right, chord): + G.add_edge(x, y) + G.graph["name"] = f"chordal_cycle_graph({p})" + return G + + +@nx._dispatch(graphs=None) +def paley_graph(p, create_using=None): + r"""Returns the Paley $\frac{(p-1)}{2}$ -regular graph on $p$ nodes. + + The returned graph is a graph on $\mathbb{Z}/p\mathbb{Z}$ with edges between $x$ and $y$ + if and only if $x-y$ is a nonzero square in $\mathbb{Z}/p\mathbb{Z}$. + + If $p \equiv 1 \pmod 4$, $-1$ is a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore $x-y$ is a square if and + only if $y-x$ is also a square, i.e the edges in the Paley graph are symmetric. + + If $p \equiv 3 \pmod 4$, $-1$ is not a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore either $x-y$ or $y-x$ + is a square in $\mathbb{Z}/p\mathbb{Z}$ but not both. + + Note that a more general definition of Paley graphs extends this construction + to graphs over $q=p^n$ vertices, by using the finite field $F_q$ instead of $\mathbb{Z}/p\mathbb{Z}$. + This construction requires to compute squares in general finite fields and is + not what is implemented here (i.e `paley_graph(25)` does not return the true + Paley graph associated with $5^2$). + + Parameters + ---------- + p : int, an odd prime number. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed directed graph. + + Raises + ------ + NetworkXError + If the graph is a multigraph. + + References + ---------- + Chapter 13 in B. Bollobas, Random Graphs. Second edition. + Cambridge Studies in Advanced Mathematics, 73. + Cambridge University Press, Cambridge (2001). + """ + G = nx.empty_graph(0, create_using, default=nx.DiGraph) + if G.is_multigraph(): + msg = "`create_using` cannot be a multigraph." + raise nx.NetworkXError(msg) + + # Compute the squares in Z/pZ. + # Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ + # when is prime). + square_set = {(x**2) % p for x in range(1, p) if (x**2) % p != 0} + + for x in range(p): + for x2 in square_set: + G.add_edge(x, (x + x2) % p) + G.graph["name"] = f"paley({p})" + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/geometric.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/geometric.py new file mode 100644 index 0000000000000000000000000000000000000000..c4f56b9c69105dddc3bfd3930c68adf936d6457e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/geometric.py @@ -0,0 +1,846 @@ +"""Generators for geometric graphs. +""" + +import math +from bisect import bisect_left +from itertools import accumulate, combinations, product + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "geometric_edges", + "geographical_threshold_graph", + "navigable_small_world_graph", + "random_geometric_graph", + "soft_random_geometric_graph", + "thresholded_random_geometric_graph", + "waxman_graph", +] + + +@nx._dispatch(node_attrs="pos_name") +def geometric_edges(G, radius, p=2, *, pos_name="pos"): + """Returns edge list of node pairs within `radius` of each other. + + Parameters + ---------- + G : networkx graph + The graph from which to generate the edge list. The nodes in `G` should + have an attribute ``pos`` corresponding to the node position, which is + used to compute the distance to other nodes. + radius : scalar + The distance threshold. Edges are included in the edge list if the + distance between the two nodes is less than `radius`. + pos_name : string, default="pos" + The name of the node attribute which represents the position of each + node in 2D coordinates. Every node in the Graph must have this attribute. + p : scalar, default=2 + The `Minkowski distance metric + `_ used to compute + distances. The default value is 2, i.e. Euclidean distance. + + Returns + ------- + edges : list + List of edges whose distances are less than `radius` + + Notes + ----- + Radius uses Minkowski distance metric `p`. + If scipy is available, `scipy.spatial.cKDTree` is used to speed computation. + + Examples + -------- + Create a graph with nodes that have a "pos" attribute representing 2D + coordinates. + + >>> G = nx.Graph() + >>> G.add_nodes_from([ + ... (0, {"pos": (0, 0)}), + ... (1, {"pos": (3, 0)}), + ... (2, {"pos": (8, 0)}), + ... ]) + >>> nx.geometric_edges(G, radius=1) + [] + >>> nx.geometric_edges(G, radius=4) + [(0, 1)] + >>> nx.geometric_edges(G, radius=6) + [(0, 1), (1, 2)] + >>> nx.geometric_edges(G, radius=9) + [(0, 1), (0, 2), (1, 2)] + """ + # Input validation - every node must have a "pos" attribute + for n, pos in G.nodes(data=pos_name): + if pos is None: + raise nx.NetworkXError( + f"Node {n} (and all nodes) must have a '{pos_name}' attribute." + ) + + # NOTE: See _geometric_edges for the actual implementation. The reason this + # is split into two functions is to avoid the overhead of input validation + # every time the function is called internally in one of the other + # geometric generators + return _geometric_edges(G, radius, p, pos_name) + + +def _geometric_edges(G, radius, p, pos_name): + """ + Implements `geometric_edges` without input validation. See `geometric_edges` + for complete docstring. + """ + nodes_pos = G.nodes(data=pos_name) + try: + import scipy as sp + except ImportError: + # no scipy KDTree so compute by for-loop + radius_p = radius**p + edges = [ + (u, v) + for (u, pu), (v, pv) in combinations(nodes_pos, 2) + if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p + ] + return edges + # scipy KDTree is available + nodes, coords = list(zip(*nodes_pos)) + kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator. + edge_indexes = kdtree.query_pairs(radius, p) + edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)] + return edges + + +@py_random_state(5) +@nx._dispatch(graphs=None) +def random_geometric_graph( + n, radius, dim=2, pos=None, p=2, seed=None, *, pos_name="pos" +): + """Returns a random geometric graph in the unit cube of dimensions `dim`. + + The random geometric graph model places `n` nodes uniformly at + random in the unit cube. Two nodes are joined by an edge if the + distance between the nodes is at most `radius`. + + Edges are determined using a KDTree when SciPy is available. + This reduces the time complexity from $O(n^2)$ to $O(n)$. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + p : float, optional + Which Minkowski distance metric to use. `p` has to meet the condition + ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A random geometric graph, undirected and without self-loops. + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. + + Examples + -------- + Create a random geometric graph on twenty nodes where nodes are joined by + an edge if their distance is at most 0.1:: + + >>> G = nx.random_geometric_graph(20, 0.1) + + Notes + ----- + This uses a *k*-d tree to build the graph. + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + For example, to use a 2D Gaussian distribution of node positions with mean + (0, 0) and standard deviation 2:: + + >>> import random + >>> n = 20 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> G = nx.random_geometric_graph(n, 0.2, pos=pos) + + References + ---------- + .. [1] Penrose, Mathew, *Random Geometric Graphs*, + Oxford Studies in Probability, 5, 2003. + + """ + # TODO Is this function just a special case of the geographical + # threshold graph? + # + # half_radius = {v: radius / 2 for v in n} + # return geographical_threshold_graph(nodes, theta=1, alpha=1, + # weight=half_radius) + # + G = nx.empty_graph(n) + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + nx.set_node_attributes(G, pos, pos_name) + + G.add_edges_from(_geometric_edges(G, radius, p, pos_name)) + return G + + +@py_random_state(6) +@nx._dispatch(graphs=None) +def soft_random_geometric_graph( + n, radius, dim=2, pos=None, p=2, p_dist=None, seed=None, *, pos_name="pos" +): + r"""Returns a soft random geometric graph in the unit cube. + + The soft random geometric graph [1] model places `n` nodes uniformly at + random in the unit cube in dimension `dim`. Two nodes of distance, `dist`, + computed by the `p`-Minkowski distance metric are joined by an edge with + probability `p_dist` if the computed distance metric value of the nodes + is at most `radius`, otherwise they are not joined. + + Edges within `radius` of each other are determined using a KDTree when + SciPy is available. This reduces the time complexity from :math:`O(n^2)` + to :math:`O(n)`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + p : float, optional + Which Minkowski distance metric to use. + `p` has to meet the condition ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + p_dist : function, optional + A probability density function computing the probability of + connecting two nodes that are of distance, dist, computed by the + Minkowski distance metric. The probability density function, `p_dist`, + must be any function that takes the metric value as input + and outputs a single probability value between 0-1. The scipy.stats + package has many probability distribution functions implemented and + tools for custom probability distribution definitions [2], and passing + the .pdf method of scipy.stats distributions can be used here. If the + probability function, `p_dist`, is not supplied, the default function + is an exponential distribution with rate parameter :math:`\lambda=1`. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A soft random geometric graph, undirected and without self-loops. + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. + + Examples + -------- + Default Graph: + + G = nx.soft_random_geometric_graph(50, 0.2) + + Custom Graph: + + Create a soft random geometric graph on 100 uniformly distributed nodes + where nodes are joined by an edge with probability computed from an + exponential distribution with rate parameter :math:`\lambda=1` if their + Euclidean distance is at most 0.2. + + Notes + ----- + This uses a *k*-d tree to build the graph. + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + For example, to use a 2D Gaussian distribution of node positions with mean + (0, 0) and standard deviation 2 + + The scipy.stats package can be used to define the probability distribution + with the .pdf method used as `p_dist`. + + :: + + >>> import random + >>> import math + >>> n = 100 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> p_dist = lambda dist: math.exp(-dist) + >>> G = nx.soft_random_geometric_graph(n, 0.2, pos=pos, p_dist=p_dist) + + References + ---------- + .. [1] Penrose, Mathew D. "Connectivity of soft random geometric graphs." + The Annals of Applied Probability 26.2 (2016): 986-1028. + .. [2] scipy.stats - + https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html + + """ + G = nx.empty_graph(n) + G.name = f"soft_random_geometric_graph({n}, {radius}, {dim})" + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + nx.set_node_attributes(G, pos, pos_name) + + # if p_dist function not supplied the default function is an exponential + # distribution with rate parameter :math:`\lambda=1`. + if p_dist is None: + + def p_dist(dist): + return math.exp(-dist) + + def should_join(edge): + u, v = edge + dist = (sum(abs(a - b) ** p for a, b in zip(pos[u], pos[v]))) ** (1 / p) + return seed.random() < p_dist(dist) + + G.add_edges_from(filter(should_join, _geometric_edges(G, radius, p, pos_name))) + return G + + +@py_random_state(7) +@nx._dispatch(graphs=None) +def geographical_threshold_graph( + n, + theta, + dim=2, + pos=None, + weight=None, + metric=None, + p_dist=None, + seed=None, + *, + pos_name="pos", + weight_name="weight", +): + r"""Returns a geographical threshold graph. + + The geographical threshold graph model places $n$ nodes uniformly at + random in a rectangular domain. Each node $u$ is assigned a weight + $w_u$. Two nodes $u$ and $v$ are joined by an edge if + + .. math:: + + (w_u + w_v)p_{dist}(r) \ge \theta + + where `r` is the distance between `u` and `v`, `p_dist` is any function of + `r`, and :math:`\theta` as the threshold parameter. `p_dist` is used to + give weight to the distance between nodes when deciding whether or not + they should be connected. The larger `p_dist` is, the more prone nodes + separated by `r` are to be connected, and vice versa. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + theta: float + Threshold value + dim : int, optional + Dimension of graph + pos : dict + Node positions as a dictionary of tuples keyed by node. + weight : dict + Node weights as a dictionary of numbers keyed by node. + metric : function + A metric on vectors of numbers (represented as lists or + tuples). This must be a function that accepts two lists (or + tuples) as input and yields a number as output. The function + must also satisfy the four requirements of a `metric`_. + Specifically, if $d$ is the function and $x$, $y$, + and $z$ are vectors in the graph, then $d$ must satisfy + + 1. $d(x, y) \ge 0$, + 2. $d(x, y) = 0$ if and only if $x = y$, + 3. $d(x, y) = d(y, x)$, + 4. $d(x, z) \le d(x, y) + d(y, z)$. + + If this argument is not specified, the Euclidean distance metric is + used. + + .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29 + p_dist : function, optional + Any function used to give weight to the distance between nodes when + deciding whether or not they should be connected. `p_dist` was + originally conceived as a probability density function giving the + probability of connecting two nodes that are of metric distance `r` + apart. The implementation here allows for more arbitrary definitions + of `p_dist` that do not need to correspond to valid probability + density functions. The :mod:`scipy.stats` package has many + probability density functions implemented and tools for custom + probability density definitions, and passing the ``.pdf`` method of + scipy.stats distributions can be used here. If ``p_dist=None`` + (the default), the exponential function :math:`r^{-2}` is used. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + weight_name : string, default="weight" + The name of the node attribute which represents the weight + of the node in the returned graph. + + Returns + ------- + Graph + A random geographic threshold graph, undirected and without + self-loops. + + Each node has a node attribute ``pos`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. Similarly, each node has a node + attribute ``weight`` that stores the weight of that node as + provided or as generated. + + Examples + -------- + Specify an alternate distance metric using the ``metric`` keyword + argument. For example, to use the `taxicab metric`_ instead of the + default `Euclidean metric`_:: + + >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + >>> G = nx.geographical_threshold_graph(10, 0.1, metric=dist) + + .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry + .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance + + Notes + ----- + If weights are not specified they are assigned to nodes by drawing randomly + from the exponential distribution with rate parameter $\lambda=1$. + To specify weights from a different distribution, use the `weight` keyword + argument:: + + >>> import random + >>> n = 20 + >>> w = {i: random.expovariate(5.0) for i in range(n)} + >>> G = nx.geographical_threshold_graph(20, 50, weight=w) + + If node positions are not specified they are randomly assigned from the + uniform distribution. + + References + ---------- + .. [1] Masuda, N., Miwa, H., Konno, N.: + Geographical threshold graphs with small-world and scale-free + properties. + Physical Review E 71, 036108 (2005) + .. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus, + Giant component and connectivity in geographical threshold graphs, + in Algorithms and Models for the Web-Graph (WAW 2007), + Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007 + """ + G = nx.empty_graph(n) + # If no weights are provided, choose them from an exponential + # distribution. + if weight is None: + weight = {v: seed.expovariate(1) for v in G} + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + # If no distance metric is provided, use Euclidean distance. + if metric is None: + metric = math.dist + nx.set_node_attributes(G, weight, weight_name) + nx.set_node_attributes(G, pos, pos_name) + + # if p_dist is not supplied, use default r^-2 + if p_dist is None: + + def p_dist(r): + return r**-2 + + # Returns ``True`` if and only if the nodes whose attributes are + # ``du`` and ``dv`` should be joined, according to the threshold + # condition. + def should_join(pair): + u, v = pair + u_pos, v_pos = pos[u], pos[v] + u_weight, v_weight = weight[u], weight[v] + return (u_weight + v_weight) * p_dist(metric(u_pos, v_pos)) >= theta + + G.add_edges_from(filter(should_join, combinations(G, 2))) + return G + + +@py_random_state(6) +@nx._dispatch(graphs=None) +def waxman_graph( + n, + beta=0.4, + alpha=0.1, + L=None, + domain=(0, 0, 1, 1), + metric=None, + seed=None, + *, + pos_name="pos", +): + r"""Returns a Waxman random graph. + + The Waxman random graph model places `n` nodes uniformly at random + in a rectangular domain. Each pair of nodes at distance `d` is + joined by an edge with probability + + .. math:: + p = \beta \exp(-d / \alpha L). + + This function implements both Waxman models, using the `L` keyword + argument. + + * Waxman-1: if `L` is not specified, it is set to be the maximum distance + between any pair of nodes. + * Waxman-2: if `L` is specified, the distance between a pair of nodes is + chosen uniformly at random from the interval `[0, L]`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + beta: float + Model parameter + alpha: float + Model parameter + L : float, optional + Maximum distance between nodes. If not specified, the actual distance + is calculated. + domain : four-tuple of numbers, optional + Domain size, given as a tuple of the form `(x_min, y_min, x_max, + y_max)`. + metric : function + A metric on vectors of numbers (represented as lists or + tuples). This must be a function that accepts two lists (or + tuples) as input and yields a number as output. The function + must also satisfy the four requirements of a `metric`_. + Specifically, if $d$ is the function and $x$, $y$, + and $z$ are vectors in the graph, then $d$ must satisfy + + 1. $d(x, y) \ge 0$, + 2. $d(x, y) = 0$ if and only if $x = y$, + 3. $d(x, y) = d(y, x)$, + 4. $d(x, z) \le d(x, y) + d(y, z)$. + + If this argument is not specified, the Euclidean distance metric is + used. + + .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29 + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A random Waxman graph, undirected and without self-loops. Each + node has a node attribute ``'pos'`` that stores the position of + that node in Euclidean space as generated by this function. + + Examples + -------- + Specify an alternate distance metric using the ``metric`` keyword + argument. For example, to use the "`taxicab metric`_" instead of the + default `Euclidean metric`_:: + + >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + >>> G = nx.waxman_graph(10, 0.5, 0.1, metric=dist) + + .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry + .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance + + Notes + ----- + Starting in NetworkX 2.0 the parameters alpha and beta align with their + usual roles in the probability distribution. In earlier versions their + positions in the expression were reversed. Their position in the calling + sequence reversed as well to minimize backward incompatibility. + + References + ---------- + .. [1] B. M. Waxman, *Routing of multipoint connections*. + IEEE J. Select. Areas Commun. 6(9),(1988) 1617--1622. + """ + G = nx.empty_graph(n) + (xmin, ymin, xmax, ymax) = domain + # Each node gets a uniformly random position in the given rectangle. + pos = {v: (seed.uniform(xmin, xmax), seed.uniform(ymin, ymax)) for v in G} + nx.set_node_attributes(G, pos, pos_name) + # If no distance metric is provided, use Euclidean distance. + if metric is None: + metric = math.dist + # If the maximum distance L is not specified (that is, we are in the + # Waxman-1 model), then find the maximum distance between any pair + # of nodes. + # + # In the Waxman-1 model, join nodes randomly based on distance. In + # the Waxman-2 model, join randomly based on random l. + if L is None: + L = max(metric(x, y) for x, y in combinations(pos.values(), 2)) + + def dist(u, v): + return metric(pos[u], pos[v]) + + else: + + def dist(u, v): + return seed.random() * L + + # `pair` is the pair of nodes to decide whether to join. + def should_join(pair): + return seed.random() < beta * math.exp(-dist(*pair) / (alpha * L)) + + G.add_edges_from(filter(should_join, combinations(G, 2))) + return G + + +@py_random_state(5) +@nx._dispatch(graphs=None) +def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None): + r"""Returns a navigable small-world graph. + + A navigable small-world graph is a directed grid with additional long-range + connections that are chosen randomly. + + [...] we begin with a set of nodes [...] that are identified with the set + of lattice points in an $n \times n$ square, + $\{(i, j): i \in \{1, 2, \ldots, n\}, j \in \{1, 2, \ldots, n\}\}$, + and we define the *lattice distance* between two nodes $(i, j)$ and + $(k, l)$ to be the number of "lattice steps" separating them: + $d((i, j), (k, l)) = |k - i| + |l - j|$. + + For a universal constant $p >= 1$, the node $u$ has a directed edge to + every other node within lattice distance $p$---these are its *local + contacts*. For universal constants $q >= 0$ and $r >= 0$ we also + construct directed edges from $u$ to $q$ other nodes (the *long-range + contacts*) using independent random trials; the $i$th directed edge from + $u$ has endpoint $v$ with probability proportional to $[d(u,v)]^{-r}$. + + -- [1]_ + + Parameters + ---------- + n : int + The length of one side of the lattice; the number of nodes in + the graph is therefore $n^2$. + p : int + The diameter of short range connections. Each node is joined with every + other node within this lattice distance. + q : int + The number of long-range connections for each node. + r : float + Exponent for decaying probability of connections. The probability of + connecting to a node at lattice distance $d$ is $1/d^r$. + dim : int + Dimension of grid + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] J. Kleinberg. The small-world phenomenon: An algorithmic + perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000. + """ + if p < 1: + raise nx.NetworkXException("p must be >= 1") + if q < 0: + raise nx.NetworkXException("q must be >= 0") + if r < 0: + raise nx.NetworkXException("r must be >= 1") + + G = nx.DiGraph() + nodes = list(product(range(n), repeat=dim)) + for p1 in nodes: + probs = [0] + for p2 in nodes: + if p1 == p2: + continue + d = sum((abs(b - a) for a, b in zip(p1, p2))) + if d <= p: + G.add_edge(p1, p2) + probs.append(d**-r) + cdf = list(accumulate(probs)) + for _ in range(q): + target = nodes[bisect_left(cdf, seed.uniform(0, cdf[-1]))] + G.add_edge(p1, target) + return G + + +@py_random_state(7) +@nx._dispatch(graphs=None) +def thresholded_random_geometric_graph( + n, + radius, + theta, + dim=2, + pos=None, + weight=None, + p=2, + seed=None, + *, + pos_name="pos", + weight_name="weight", +): + r"""Returns a thresholded random geometric graph in the unit cube. + + The thresholded random geometric graph [1] model places `n` nodes + uniformly at random in the unit cube of dimensions `dim`. Each node + `u` is assigned a weight :math:`w_u`. Two nodes `u` and `v` are + joined by an edge if they are within the maximum connection distance, + `radius` computed by the `p`-Minkowski distance and the summation of + weights :math:`w_u` + :math:`w_v` is greater than or equal + to the threshold parameter `theta`. + + Edges within `radius` of each other are determined using a KDTree when + SciPy is available. This reduces the time complexity from :math:`O(n^2)` + to :math:`O(n)`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + theta: float + Threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + weight : dict, optional + Node weights as a dictionary of numbers keyed by node. + p : float, optional (default 2) + Which Minkowski distance metric to use. `p` has to meet the condition + ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + weight_name : string, default="weight" + The name of the node attribute which represents the weight + of the node in the returned graph. + + Returns + ------- + Graph + A thresholded random geographic graph, undirected and without + self-loops. + + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. Similarly, each node has a nodethre + attribute ``'weight'`` that stores the weight of that node as + provided or as generated. + + Examples + -------- + Default Graph: + + G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1) + + Custom Graph: + + Create a thresholded random geometric graph on 50 uniformly distributed + nodes where nodes are joined by an edge if their sum weights drawn from + a exponential distribution with rate = 5 are >= theta = 0.1 and their + Euclidean distance is at most 0.2. + + Notes + ----- + This uses a *k*-d tree to build the graph. + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + For example, to use a 2D Gaussian distribution of node positions with mean + (0, 0) and standard deviation 2 + + If weights are not specified they are assigned to nodes by drawing randomly + from the exponential distribution with rate parameter :math:`\lambda=1`. + To specify weights from a different distribution, use the `weight` keyword + argument:: + + :: + + >>> import random + >>> import math + >>> n = 50 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> w = {i: random.expovariate(5.0) for i in range(n)} + >>> G = nx.thresholded_random_geometric_graph(n, 0.2, 0.1, 2, pos, w) + + References + ---------- + .. [1] http://cole-maclean.github.io/blog/files/thesis.pdf + + """ + G = nx.empty_graph(n) + G.name = f"thresholded_random_geometric_graph({n}, {radius}, {theta}, {dim})" + # If no weights are provided, choose them from an exponential + # distribution. + if weight is None: + weight = {v: seed.expovariate(1) for v in G} + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + # If no distance metric is provided, use Euclidean distance. + nx.set_node_attributes(G, weight, weight_name) + nx.set_node_attributes(G, pos, pos_name) + + edges = ( + (u, v) + for u, v in _geometric_edges(G, radius, p, pos_name) + if weight[u] + weight[v] >= theta + ) + G.add_edges_from(edges) + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/internet_as_graphs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/internet_as_graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..90e90203e0bbcb5d6a684fdcda7ff327838cb0f1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/internet_as_graphs.py @@ -0,0 +1,441 @@ +"""Generates graphs resembling the Internet Autonomous System network""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["random_internet_as_graph"] + + +def uniform_int_from_avg(a, m, seed): + """Pick a random integer with uniform probability. + + Returns a random integer uniformly taken from a distribution with + minimum value 'a' and average value 'm', X~U(a,b), E[X]=m, X in N where + b = 2*m - a. + + Notes + ----- + p = (b-floor(b))/2 + X = X1 + X2; X1~U(a,floor(b)), X2~B(p) + E[X] = E[X1] + E[X2] = (floor(b)+a)/2 + (b-floor(b))/2 = (b+a)/2 = m + """ + + from math import floor + + assert m >= a + b = 2 * m - a + p = (b - floor(b)) / 2 + X1 = round(seed.random() * (floor(b) - a) + a) + if seed.random() < p: + X2 = 1 + else: + X2 = 0 + return X1 + X2 + + +def choose_pref_attach(degs, seed): + """Pick a random value, with a probability given by its weight. + + Returns a random choice among degs keys, each of which has a + probability proportional to the corresponding dictionary value. + + Parameters + ---------- + degs: dictionary + It contains the possible values (keys) and the corresponding + probabilities (values) + seed: random state + + Returns + ------- + v: object + A key of degs or None if degs is empty + """ + + if len(degs) == 0: + return None + s = sum(degs.values()) + if s == 0: + return seed.choice(list(degs.keys())) + v = seed.random() * s + + nodes = list(degs.keys()) + i = 0 + acc = degs[nodes[i]] + while v > acc: + i += 1 + acc += degs[nodes[i]] + return nodes[i] + + +class AS_graph_generator: + """Generates random internet AS graphs.""" + + def __init__(self, n, seed): + """Initializes variables. Immediate numbers are taken from [1]. + + Parameters + ---------- + n: integer + Number of graph nodes + seed: random state + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + GG: AS_graph_generator object + + References + ---------- + [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + self.seed = seed + self.n_t = min(n, round(self.seed.random() * 2 + 4)) # num of T nodes + self.n_m = round(0.15 * n) # number of M nodes + self.n_cp = round(0.05 * n) # number of CP nodes + self.n_c = max(0, n - self.n_t - self.n_m - self.n_cp) # number of C nodes + + self.d_m = 2 + (2.5 * n) / 10000 # average multihoming degree for M nodes + self.d_cp = 2 + (1.5 * n) / 10000 # avg multihoming degree for CP nodes + self.d_c = 1 + (5 * n) / 100000 # average multihoming degree for C nodes + + self.p_m_m = 1 + (2 * n) / 10000 # avg num of peer edges between M and M + self.p_cp_m = 0.2 + (2 * n) / 10000 # avg num of peer edges between CP, M + self.p_cp_cp = 0.05 + (2 * n) / 100000 # avg num of peer edges btwn CP, CP + + self.t_m = 0.375 # probability M's provider is T + self.t_cp = 0.375 # probability CP's provider is T + self.t_c = 0.125 # probability C's provider is T + + def t_graph(self): + """Generates the core mesh network of tier one nodes of a AS graph. + + Returns + ------- + G: Networkx Graph + Core network + """ + + self.G = nx.Graph() + for i in range(self.n_t): + self.G.add_node(i, type="T") + for r in self.regions: + self.regions[r].add(i) + for j in self.G.nodes(): + if i != j: + self.add_edge(i, j, "peer") + self.customers[i] = set() + self.providers[i] = set() + return self.G + + def add_edge(self, i, j, kind): + if kind == "transit": + customer = str(i) + else: + customer = "none" + self.G.add_edge(i, j, type=kind, customer=customer) + + def choose_peer_pref_attach(self, node_list): + """Pick a node with a probability weighted by its peer degree. + + Pick a node from node_list with preferential attachment + computed only on their peer degree + """ + + d = {} + for n in node_list: + d[n] = self.G.nodes[n]["peers"] + return choose_pref_attach(d, self.seed) + + def choose_node_pref_attach(self, node_list): + """Pick a node with a probability weighted by its degree. + + Pick a node from node_list with preferential attachment + computed on their degree + """ + + degs = dict(self.G.degree(node_list)) + return choose_pref_attach(degs, self.seed) + + def add_customer(self, i, j): + """Keep the dictionaries 'customers' and 'providers' consistent.""" + + self.customers[j].add(i) + self.providers[i].add(j) + for z in self.providers[j]: + self.customers[z].add(i) + self.providers[i].add(z) + + def add_node(self, i, kind, reg2prob, avg_deg, t_edge_prob): + """Add a node and its customer transit edges to the graph. + + Parameters + ---------- + i: object + Identifier of the new node + kind: string + Type of the new node. Options are: 'M' for middle node, 'CP' for + content provider and 'C' for customer. + reg2prob: float + Probability the new node can be in two different regions. + avg_deg: float + Average number of transit nodes of which node i is customer. + t_edge_prob: float + Probability node i establish a customer transit edge with a tier + one (T) node + + Returns + ------- + i: object + Identifier of the new node + """ + + regs = 1 # regions in which node resides + if self.seed.random() < reg2prob: # node is in two regions + regs = 2 + node_options = set() + + self.G.add_node(i, type=kind, peers=0) + self.customers[i] = set() + self.providers[i] = set() + self.nodes[kind].add(i) + for r in self.seed.sample(list(self.regions), regs): + node_options = node_options.union(self.regions[r]) + self.regions[r].add(i) + + edge_num = uniform_int_from_avg(1, avg_deg, self.seed) + + t_options = node_options.intersection(self.nodes["T"]) + m_options = node_options.intersection(self.nodes["M"]) + if i in m_options: + m_options.remove(i) + d = 0 + while d < edge_num and (len(t_options) > 0 or len(m_options) > 0): + if len(m_options) == 0 or ( + len(t_options) > 0 and self.seed.random() < t_edge_prob + ): # add edge to a T node + j = self.choose_node_pref_attach(t_options) + t_options.remove(j) + else: + j = self.choose_node_pref_attach(m_options) + m_options.remove(j) + self.add_edge(i, j, "transit") + self.add_customer(i, j) + d += 1 + + return i + + def add_m_peering_link(self, m, to_kind): + """Add a peering link between two middle tier (M) nodes. + + Target node j is drawn considering a preferential attachment based on + other M node peering degree. + + Parameters + ---------- + m: object + Node identifier + to_kind: string + type for target node j (must be always M) + + Returns + ------- + success: boolean + """ + + # candidates are of type 'M' and are not customers of m + node_options = self.nodes["M"].difference(self.customers[m]) + # candidates are not providers of m + node_options = node_options.difference(self.providers[m]) + # remove self + if m in node_options: + node_options.remove(m) + + # remove candidates we are already connected to + for j in self.G.neighbors(m): + if j in node_options: + node_options.remove(j) + + if len(node_options) > 0: + j = self.choose_peer_pref_attach(node_options) + self.add_edge(m, j, "peer") + self.G.nodes[m]["peers"] += 1 + self.G.nodes[j]["peers"] += 1 + return True + else: + return False + + def add_cp_peering_link(self, cp, to_kind): + """Add a peering link to a content provider (CP) node. + + Target node j can be CP or M and it is drawn uniformly among the nodes + belonging to the same region as cp. + + Parameters + ---------- + cp: object + Node identifier + to_kind: string + type for target node j (must be M or CP) + + Returns + ------- + success: boolean + """ + + node_options = set() + for r in self.regions: # options include nodes in the same region(s) + if cp in self.regions[r]: + node_options = node_options.union(self.regions[r]) + + # options are restricted to the indicated kind ('M' or 'CP') + node_options = self.nodes[to_kind].intersection(node_options) + + # remove self + if cp in node_options: + node_options.remove(cp) + + # remove nodes that are cp's providers + node_options = node_options.difference(self.providers[cp]) + + # remove nodes we are already connected to + for j in self.G.neighbors(cp): + if j in node_options: + node_options.remove(j) + + if len(node_options) > 0: + j = self.seed.sample(list(node_options), 1)[0] + self.add_edge(cp, j, "peer") + self.G.nodes[cp]["peers"] += 1 + self.G.nodes[j]["peers"] += 1 + return True + else: + return False + + def graph_regions(self, rn): + """Initializes AS network regions. + + Parameters + ---------- + rn: integer + Number of regions + """ + + self.regions = {} + for i in range(rn): + self.regions["REG" + str(i)] = set() + + def add_peering_links(self, from_kind, to_kind): + """Utility function to add peering links among node groups.""" + peer_link_method = None + if from_kind == "M": + peer_link_method = self.add_m_peering_link + m = self.p_m_m + if from_kind == "CP": + peer_link_method = self.add_cp_peering_link + if to_kind == "M": + m = self.p_cp_m + else: + m = self.p_cp_cp + + for i in self.nodes[from_kind]: + num = uniform_int_from_avg(0, m, self.seed) + for _ in range(num): + peer_link_method(i, to_kind) + + def generate(self): + """Generates a random AS network graph as described in [1]. + + Returns + ------- + G: Graph object + + Notes + ----- + The process steps are the following: first we create the core network + of tier one nodes, then we add the middle tier (M), the content + provider (CP) and the customer (C) nodes along with their transit edges + (link i,j means i is customer of j). Finally we add peering links + between M nodes, between M and CP nodes and between CP node couples. + For a detailed description of the algorithm, please refer to [1]. + + References + ---------- + [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + self.graph_regions(5) + self.customers = {} + self.providers = {} + self.nodes = {"T": set(), "M": set(), "CP": set(), "C": set()} + + self.t_graph() + self.nodes["T"] = set(self.G.nodes()) + + i = len(self.nodes["T"]) + for _ in range(self.n_m): + self.nodes["M"].add(self.add_node(i, "M", 0.2, self.d_m, self.t_m)) + i += 1 + for _ in range(self.n_cp): + self.nodes["CP"].add(self.add_node(i, "CP", 0.05, self.d_cp, self.t_cp)) + i += 1 + for _ in range(self.n_c): + self.nodes["C"].add(self.add_node(i, "C", 0, self.d_c, self.t_c)) + i += 1 + + self.add_peering_links("M", "M") + self.add_peering_links("CP", "M") + self.add_peering_links("CP", "CP") + + return self.G + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_internet_as_graph(n, seed=None): + """Generates a random undirected graph resembling the Internet AS network + + Parameters + ---------- + n: integer in [1000, 10000] + Number of graph nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G: Networkx Graph object + A randomly generated undirected graph + + Notes + ----- + This algorithm returns an undirected graph resembling the Internet + Autonomous System (AS) network, it uses the approach by Elmokashfi et al. + [1]_ and it grants the properties described in the related paper [1]_. + + Each node models an autonomous system, with an attribute 'type' specifying + its kind; tier-1 (T), mid-level (M), customer (C) or content-provider (CP). + Each edge models an ADV communication link (hence, bidirectional) with + attributes: + + - type: transit|peer, the kind of commercial agreement between nodes; + - customer: , the identifier of the node acting as customer + ('none' if type is peer). + + References + ---------- + .. [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + GG = AS_graph_generator(n, seed) + G = GG.generate() + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/interval_graph.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/interval_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..80bfdf4f038d429d7141e520247c48b6850462e2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/interval_graph.py @@ -0,0 +1,71 @@ +""" +Generators for interval graph. +""" +from collections.abc import Sequence + +import networkx as nx + +__all__ = ["interval_graph"] + + +@nx._dispatch(graphs=None) +def interval_graph(intervals): + """Generates an interval graph for a list of intervals given. + + In graph theory, an interval graph is an undirected graph formed from a set + of closed intervals on the real line, with a vertex for each interval + and an edge between vertices whose intervals intersect. + It is the intersection graph of the intervals. + + More information can be found at: + https://en.wikipedia.org/wiki/Interval_graph + + Parameters + ---------- + intervals : a sequence of intervals, say (l, r) where l is the left end, + and r is the right end of the closed interval. + + Returns + ------- + G : networkx graph + + Examples + -------- + >>> intervals = [(-2, 3), [1, 4], (2, 3), (4, 6)] + >>> G = nx.interval_graph(intervals) + >>> sorted(G.edges) + [((-2, 3), (1, 4)), ((-2, 3), (2, 3)), ((1, 4), (2, 3)), ((1, 4), (4, 6))] + + Raises + ------ + :exc:`TypeError` + if `intervals` contains None or an element which is not + collections.abc.Sequence or not a length of 2. + :exc:`ValueError` + if `intervals` contains an interval such that min1 > max1 + where min1,max1 = interval + """ + intervals = list(intervals) + for interval in intervals: + if not (isinstance(interval, Sequence) and len(interval) == 2): + raise TypeError( + "Each interval must have length 2, and be a " + "collections.abc.Sequence such as tuple or list." + ) + if interval[0] > interval[1]: + raise ValueError( + f"Interval must have lower value first. " f"Got {interval}" + ) + + graph = nx.Graph() + + tupled_intervals = [tuple(interval) for interval in intervals] + graph.add_nodes_from(tupled_intervals) + + while tupled_intervals: + min1, max1 = interval1 = tupled_intervals.pop() + for interval2 in tupled_intervals: + min2, max2 = interval2 + if max1 >= min2 and max2 >= min1: + graph.add_edge(interval1, interval2) + return graph diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/joint_degree_seq.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/joint_degree_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..fd2d56eba5f4dc31c5c6795d5f3aae3ebe34dc4c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/joint_degree_seq.py @@ -0,0 +1,664 @@ +"""Generate graphs with a given joint degree and directed joint degree""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "is_valid_joint_degree", + "is_valid_directed_joint_degree", + "joint_degree_graph", + "directed_joint_degree_graph", +] + + +@nx._dispatch(graphs=None) +def is_valid_joint_degree(joint_degrees): + """Checks whether the given joint degree dictionary is realizable. + + A *joint degree dictionary* is a dictionary of dictionaries, in + which entry ``joint_degrees[k][l]`` is an integer representing the + number of edges joining nodes of degree *k* with nodes of degree + *l*. Such a dictionary is realizable as a simple graph if and only + if the following conditions are satisfied. + + - each entry must be an integer, + - the total number of nodes of degree *k*, computed by + ``sum(joint_degrees[k].values()) / k``, must be an integer, + - the total number of edges joining nodes of degree *k* with + nodes of degree *l* cannot exceed the total number of possible edges, + - each diagonal entry ``joint_degrees[k][k]`` must be even (this is + a convention assumed by the :func:`joint_degree_graph` function). + + + Parameters + ---------- + joint_degrees : dictionary of dictionary of integers + A joint degree dictionary in which entry ``joint_degrees[k][l]`` + is the number of edges joining nodes of degree *k* with nodes of + degree *l*. + + Returns + ------- + bool + Whether the given joint degree dictionary is realizable as a + simple graph. + + References + ---------- + .. [1] M. Gjoka, M. Kurant, A. Markopoulou, "2.5K Graphs: from Sampling + to Generation", IEEE Infocom, 2013. + .. [2] I. Stanton, A. Pinar, "Constructing and sampling graphs with a + prescribed joint degree distribution", Journal of Experimental + Algorithmics, 2012. + """ + + degree_count = {} + for k in joint_degrees: + if k > 0: + k_size = sum(joint_degrees[k].values()) / k + if not k_size.is_integer(): + return False + degree_count[k] = k_size + + for k in joint_degrees: + for l in joint_degrees[k]: + if not float(joint_degrees[k][l]).is_integer(): + return False + + if (k != l) and (joint_degrees[k][l] > degree_count[k] * degree_count[l]): + return False + elif k == l: + if joint_degrees[k][k] > degree_count[k] * (degree_count[k] - 1): + return False + if joint_degrees[k][k] % 2 != 0: + return False + + # if all above conditions have been satisfied then the input + # joint degree is realizable as a simple graph. + return True + + +def _neighbor_switch(G, w, unsat, h_node_residual, avoid_node_id=None): + """Releases one free stub for ``w``, while preserving joint degree in G. + + Parameters + ---------- + G : NetworkX graph + Graph in which the neighbor switch will take place. + w : integer + Node id for which we will execute this neighbor switch. + unsat : set of integers + Set of unsaturated node ids that have the same degree as w. + h_node_residual: dictionary of integers + Keeps track of the remaining stubs for a given node. + avoid_node_id: integer + Node id to avoid when selecting w_prime. + + Notes + ----- + First, it selects *w_prime*, an unsaturated node that has the same degree + as ``w``. Second, it selects *switch_node*, a neighbor node of ``w`` that + is not connected to *w_prime*. Then it executes an edge swap i.e. removes + (``w``,*switch_node*) and adds (*w_prime*,*switch_node*). Gjoka et. al. [1] + prove that such an edge swap is always possible. + + References + ---------- + .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple + Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15 + """ + + if (avoid_node_id is None) or (h_node_residual[avoid_node_id] > 1): + # select unsaturated node w_prime that has the same degree as w + w_prime = next(iter(unsat)) + else: + # assume that the node pair (v,w) has been selected for connection. if + # - neighbor_switch is called for node w, + # - nodes v and w have the same degree, + # - node v=avoid_node_id has only one stub left, + # then prevent v=avoid_node_id from being selected as w_prime. + + iter_var = iter(unsat) + while True: + w_prime = next(iter_var) + if w_prime != avoid_node_id: + break + + # select switch_node, a neighbor of w, that is not connected to w_prime + w_prime_neighbs = G[w_prime] # slightly faster declaring this variable + for v in G[w]: + if (v not in w_prime_neighbs) and (v != w_prime): + switch_node = v + break + + # remove edge (w,switch_node), add edge (w_prime,switch_node) and update + # data structures + G.remove_edge(w, switch_node) + G.add_edge(w_prime, switch_node) + h_node_residual[w] += 1 + h_node_residual[w_prime] -= 1 + if h_node_residual[w_prime] == 0: + unsat.remove(w_prime) + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def joint_degree_graph(joint_degrees, seed=None): + """Generates a random simple graph with the given joint degree dictionary. + + Parameters + ---------- + joint_degrees : dictionary of dictionary of integers + A joint degree dictionary in which entry ``joint_degrees[k][l]`` is the + number of edges joining nodes of degree *k* with nodes of degree *l*. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : Graph + A graph with the specified joint degree dictionary. + + Raises + ------ + NetworkXError + If *joint_degrees* dictionary is not realizable. + + Notes + ----- + In each iteration of the "while loop" the algorithm picks two disconnected + nodes *v* and *w*, of degree *k* and *l* correspondingly, for which + ``joint_degrees[k][l]`` has not reached its target yet. It then adds + edge (*v*, *w*) and increases the number of edges in graph G by one. + + The intelligence of the algorithm lies in the fact that it is always + possible to add an edge between such disconnected nodes *v* and *w*, + even if one or both nodes do not have free stubs. That is made possible by + executing a "neighbor switch", an edge rewiring move that releases + a free stub while keeping the joint degree of G the same. + + The algorithm continues for E (number of edges) iterations of + the "while loop", at the which point all entries of the given + ``joint_degrees[k][l]`` have reached their target values and the + construction is complete. + + References + ---------- + .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple + Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15 + + Examples + -------- + >>> joint_degrees = { + ... 1: {4: 1}, + ... 2: {2: 2, 3: 2, 4: 2}, + ... 3: {2: 2, 4: 1}, + ... 4: {1: 1, 2: 2, 3: 1}, + ... } + >>> G = nx.joint_degree_graph(joint_degrees) + >>> + """ + + if not is_valid_joint_degree(joint_degrees): + msg = "Input joint degree dict not realizable as a simple graph" + raise nx.NetworkXError(msg) + + # compute degree count from joint_degrees + degree_count = {k: sum(l.values()) // k for k, l in joint_degrees.items() if k > 0} + + # start with empty N-node graph + N = sum(degree_count.values()) + G = nx.empty_graph(N) + + # for a given degree group, keep the list of all node ids + h_degree_nodelist = {} + + # for a given node, keep track of the remaining stubs + h_node_residual = {} + + # populate h_degree_nodelist and h_node_residual + nodeid = 0 + for degree, num_nodes in degree_count.items(): + h_degree_nodelist[degree] = range(nodeid, nodeid + num_nodes) + for v in h_degree_nodelist[degree]: + h_node_residual[v] = degree + nodeid += int(num_nodes) + + # iterate over every degree pair (k,l) and add the number of edges given + # for each pair + for k in joint_degrees: + for l in joint_degrees[k]: + # n_edges_add is the number of edges to add for the + # degree pair (k,l) + n_edges_add = joint_degrees[k][l] + + if (n_edges_add > 0) and (k >= l): + # number of nodes with degree k and l + k_size = degree_count[k] + l_size = degree_count[l] + + # k_nodes and l_nodes consist of all nodes of degree k and l + k_nodes = h_degree_nodelist[k] + l_nodes = h_degree_nodelist[l] + + # k_unsat and l_unsat consist of nodes of degree k and l that + # are unsaturated (nodes that have at least 1 available stub) + k_unsat = {v for v in k_nodes if h_node_residual[v] > 0} + + if k != l: + l_unsat = {w for w in l_nodes if h_node_residual[w] > 0} + else: + l_unsat = k_unsat + n_edges_add = joint_degrees[k][l] // 2 + + while n_edges_add > 0: + # randomly pick nodes v and w that have degrees k and l + v = k_nodes[seed.randrange(k_size)] + w = l_nodes[seed.randrange(l_size)] + + # if nodes v and w are disconnected then attempt to connect + if not G.has_edge(v, w) and (v != w): + # if node v has no free stubs then do neighbor switch + if h_node_residual[v] == 0: + _neighbor_switch(G, v, k_unsat, h_node_residual) + + # if node w has no free stubs then do neighbor switch + if h_node_residual[w] == 0: + if k != l: + _neighbor_switch(G, w, l_unsat, h_node_residual) + else: + _neighbor_switch( + G, w, l_unsat, h_node_residual, avoid_node_id=v + ) + + # add edge (v, w) and update data structures + G.add_edge(v, w) + h_node_residual[v] -= 1 + h_node_residual[w] -= 1 + n_edges_add -= 1 + + if h_node_residual[v] == 0: + k_unsat.discard(v) + if h_node_residual[w] == 0: + l_unsat.discard(w) + return G + + +@nx._dispatch(graphs=None) +def is_valid_directed_joint_degree(in_degrees, out_degrees, nkk): + """Checks whether the given directed joint degree input is realizable + + Parameters + ---------- + in_degrees : list of integers + in degree sequence contains the in degrees of nodes. + out_degrees : list of integers + out degree sequence contains the out degrees of nodes. + nkk : dictionary of dictionary of integers + directed joint degree dictionary. for nodes of out degree k (first + level of dict) and nodes of in degree l (second level of dict) + describes the number of edges. + + Returns + ------- + boolean + returns true if given input is realizable, else returns false. + + Notes + ----- + Here is the list of conditions that the inputs (in/out degree sequences, + nkk) need to satisfy for simple directed graph realizability: + + - Condition 0: in_degrees and out_degrees have the same length + - Condition 1: nkk[k][l] is integer for all k,l + - Condition 2: sum(nkk[k])/k = number of nodes with partition id k, is an + integer and matching degree sequence + - Condition 3: number of edges and non-chords between k and l cannot exceed + maximum possible number of edges + + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + """ + V = {} # number of nodes with in/out degree. + forbidden = {} + if len(in_degrees) != len(out_degrees): + return False + + for idx in range(len(in_degrees)): + i = in_degrees[idx] + o = out_degrees[idx] + V[(i, 0)] = V.get((i, 0), 0) + 1 + V[(o, 1)] = V.get((o, 1), 0) + 1 + + forbidden[(o, i)] = forbidden.get((o, i), 0) + 1 + + S = {} # number of edges going from in/out degree nodes. + for k in nkk: + for l in nkk[k]: + val = nkk[k][l] + if not float(val).is_integer(): # condition 1 + return False + + if val > 0: + S[(k, 1)] = S.get((k, 1), 0) + val + S[(l, 0)] = S.get((l, 0), 0) + val + # condition 3 + if val + forbidden.get((k, l), 0) > V[(k, 1)] * V[(l, 0)]: + return False + + return all(S[s] / s[0] == V[s] for s in S) + + +def _directed_neighbor_switch( + G, w, unsat, h_node_residual_out, chords, h_partition_in, partition +): + """Releases one free stub for node w, while preserving joint degree in G. + + Parameters + ---------- + G : networkx directed graph + graph within which the edge swap will take place. + w : integer + node id for which we need to perform a neighbor switch. + unsat: set of integers + set of node ids that have the same degree as w and are unsaturated. + h_node_residual_out: dict of integers + for a given node, keeps track of the remaining stubs to be added. + chords: set of tuples + keeps track of available positions to add edges. + h_partition_in: dict of integers + for a given node, keeps track of its partition id (in degree). + partition: integer + partition id to check if chords have to be updated. + + Notes + ----- + First, it selects node w_prime that (1) has the same degree as w and + (2) is unsaturated. Then, it selects node v, a neighbor of w, that is + not connected to w_prime and does an edge swap i.e. removes (w,v) and + adds (w_prime,v). If neighbor switch is not possible for w using + w_prime and v, then return w_prime; in [1] it's proven that + such unsaturated nodes can be used. + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + """ + w_prime = unsat.pop() + unsat.add(w_prime) + # select node t, a neighbor of w, that is not connected to w_prime + w_neighbs = list(G.successors(w)) + # slightly faster declaring this variable + w_prime_neighbs = list(G.successors(w_prime)) + + for v in w_neighbs: + if (v not in w_prime_neighbs) and w_prime != v: + # removes (w,v), add (w_prime,v) and update data structures + G.remove_edge(w, v) + G.add_edge(w_prime, v) + + if h_partition_in[v] == partition: + chords.add((w, v)) + chords.discard((w_prime, v)) + + h_node_residual_out[w] += 1 + h_node_residual_out[w_prime] -= 1 + if h_node_residual_out[w_prime] == 0: + unsat.remove(w_prime) + return None + + # If neighbor switch didn't work, use unsaturated node + return w_prime + + +def _directed_neighbor_switch_rev( + G, w, unsat, h_node_residual_in, chords, h_partition_out, partition +): + """The reverse of directed_neighbor_switch. + + Parameters + ---------- + G : networkx directed graph + graph within which the edge swap will take place. + w : integer + node id for which we need to perform a neighbor switch. + unsat: set of integers + set of node ids that have the same degree as w and are unsaturated. + h_node_residual_in: dict of integers + for a given node, keeps track of the remaining stubs to be added. + chords: set of tuples + keeps track of available positions to add edges. + h_partition_out: dict of integers + for a given node, keeps track of its partition id (out degree). + partition: integer + partition id to check if chords have to be updated. + + Notes + ----- + Same operation as directed_neighbor_switch except it handles this operation + for incoming edges instead of outgoing. + """ + w_prime = unsat.pop() + unsat.add(w_prime) + # slightly faster declaring these as variables. + w_neighbs = list(G.predecessors(w)) + w_prime_neighbs = list(G.predecessors(w_prime)) + # select node v, a neighbor of w, that is not connected to w_prime. + for v in w_neighbs: + if (v not in w_prime_neighbs) and w_prime != v: + # removes (v,w), add (v,w_prime) and update data structures. + G.remove_edge(v, w) + G.add_edge(v, w_prime) + if h_partition_out[v] == partition: + chords.add((v, w)) + chords.discard((v, w_prime)) + + h_node_residual_in[w] += 1 + h_node_residual_in[w_prime] -= 1 + if h_node_residual_in[w_prime] == 0: + unsat.remove(w_prime) + return None + + # If neighbor switch didn't work, use the unsaturated node. + return w_prime + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def directed_joint_degree_graph(in_degrees, out_degrees, nkk, seed=None): + """Generates a random simple directed graph with the joint degree. + + Parameters + ---------- + degree_seq : list of tuples (of size 3) + degree sequence contains tuples of nodes with node id, in degree and + out degree. + nkk : dictionary of dictionary of integers + directed joint degree dictionary, for nodes of out degree k (first + level of dict) and nodes of in degree l (second level of dict) + describes the number of edges. + seed : hashable object, optional + Seed for random number generator. + + Returns + ------- + G : Graph + A directed graph with the specified inputs. + + Raises + ------ + NetworkXError + If degree_seq and nkk are not realizable as a simple directed graph. + + + Notes + ----- + Similarly to the undirected version: + In each iteration of the "while loop" the algorithm picks two disconnected + nodes v and w, of degree k and l correspondingly, for which nkk[k][l] has + not reached its target yet i.e. (for given k,l): n_edges_add < nkk[k][l]. + It then adds edge (v,w) and always increases the number of edges in graph G + by one. + + The intelligence of the algorithm lies in the fact that it is always + possible to add an edge between disconnected nodes v and w, for which + nkk[degree(v)][degree(w)] has not reached its target, even if one or both + nodes do not have free stubs. If either node v or w does not have a free + stub, we perform a "neighbor switch", an edge rewiring move that releases a + free stub while keeping nkk the same. + + The difference for the directed version lies in the fact that neighbor + switches might not be able to rewire, but in these cases unsaturated nodes + can be reassigned to use instead, see [1] for detailed description and + proofs. + + The algorithm continues for E (number of edges in the graph) iterations of + the "while loop", at which point all entries of the given nkk[k][l] have + reached their target values and the construction is complete. + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + + Examples + -------- + >>> in_degrees = [0, 1, 1, 2] + >>> out_degrees = [1, 1, 1, 1] + >>> nkk = {1: {1: 2, 2: 2}} + >>> G = nx.directed_joint_degree_graph(in_degrees, out_degrees, nkk) + >>> + """ + if not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk): + msg = "Input is not realizable as a simple graph" + raise nx.NetworkXError(msg) + + # start with an empty directed graph. + G = nx.DiGraph() + + # for a given group, keep the list of all node ids. + h_degree_nodelist_in = {} + h_degree_nodelist_out = {} + # for a given group, keep the list of all unsaturated node ids. + h_degree_nodelist_in_unsat = {} + h_degree_nodelist_out_unsat = {} + # for a given node, keep track of the remaining stubs to be added. + h_node_residual_out = {} + h_node_residual_in = {} + # for a given node, keep track of the partition id. + h_partition_out = {} + h_partition_in = {} + # keep track of non-chords between pairs of partition ids. + non_chords = {} + + # populate data structures + for idx, i in enumerate(in_degrees): + idx = int(idx) + if i > 0: + h_degree_nodelist_in.setdefault(i, []) + h_degree_nodelist_in_unsat.setdefault(i, set()) + h_degree_nodelist_in[i].append(idx) + h_degree_nodelist_in_unsat[i].add(idx) + h_node_residual_in[idx] = i + h_partition_in[idx] = i + + for idx, o in enumerate(out_degrees): + o = out_degrees[idx] + non_chords[(o, in_degrees[idx])] = non_chords.get((o, in_degrees[idx]), 0) + 1 + idx = int(idx) + if o > 0: + h_degree_nodelist_out.setdefault(o, []) + h_degree_nodelist_out_unsat.setdefault(o, set()) + h_degree_nodelist_out[o].append(idx) + h_degree_nodelist_out_unsat[o].add(idx) + h_node_residual_out[idx] = o + h_partition_out[idx] = o + + G.add_node(idx) + + nk_in = {} + nk_out = {} + for p in h_degree_nodelist_in: + nk_in[p] = len(h_degree_nodelist_in[p]) + for p in h_degree_nodelist_out: + nk_out[p] = len(h_degree_nodelist_out[p]) + + # iterate over every degree pair (k,l) and add the number of edges given + # for each pair. + for k in nkk: + for l in nkk[k]: + n_edges_add = nkk[k][l] + + if n_edges_add > 0: + # chords contains a random set of potential edges. + chords = set() + + k_len = nk_out[k] + l_len = nk_in[l] + chords_sample = seed.sample( + range(k_len * l_len), n_edges_add + non_chords.get((k, l), 0) + ) + + num = 0 + while len(chords) < n_edges_add: + i = h_degree_nodelist_out[k][chords_sample[num] % k_len] + j = h_degree_nodelist_in[l][chords_sample[num] // k_len] + num += 1 + if i != j: + chords.add((i, j)) + + # k_unsat and l_unsat consist of nodes of in/out degree k and l + # that are unsaturated i.e. those nodes that have at least one + # available stub + k_unsat = h_degree_nodelist_out_unsat[k] + l_unsat = h_degree_nodelist_in_unsat[l] + + while n_edges_add > 0: + v, w = chords.pop() + chords.add((v, w)) + + # if node v has no free stubs then do neighbor switch. + if h_node_residual_out[v] == 0: + _v = _directed_neighbor_switch( + G, + v, + k_unsat, + h_node_residual_out, + chords, + h_partition_in, + l, + ) + if _v is not None: + v = _v + + # if node w has no free stubs then do neighbor switch. + if h_node_residual_in[w] == 0: + _w = _directed_neighbor_switch_rev( + G, + w, + l_unsat, + h_node_residual_in, + chords, + h_partition_out, + k, + ) + if _w is not None: + w = _w + + # add edge (v,w) and update data structures. + G.add_edge(v, w) + h_node_residual_out[v] -= 1 + h_node_residual_in[w] -= 1 + n_edges_add -= 1 + chords.discard((v, w)) + + if h_node_residual_out[v] == 0: + k_unsat.discard(v) + if h_node_residual_in[w] == 0: + l_unsat.discard(w) + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/nonisomorphic_trees.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/nonisomorphic_trees.py new file mode 100644 index 0000000000000000000000000000000000000000..ae74b09c9cc778bd3301392d8bc8b6ebd617300a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/nonisomorphic_trees.py @@ -0,0 +1,192 @@ +""" +Implementation of the Wright, Richmond, Odlyzko and McKay (WROM) +algorithm for the enumeration of all non-isomorphic free trees of a +given order. Rooted trees are represented by level sequences, i.e., +lists in which the i-th element specifies the distance of vertex i to +the root. + +""" + +__all__ = ["nonisomorphic_trees", "number_of_nonisomorphic_trees"] + +import networkx as nx + + +@nx._dispatch(graphs=None) +def nonisomorphic_trees(order, create="graph"): + """Returns a list of nonisomorphic trees + + Parameters + ---------- + order : int + order of the desired tree(s) + + create : graph or matrix (default="Graph) + If graph is selected a list of trees will be returned, + if matrix is selected a list of adjacency matrix will + be returned + + Returns + ------- + G : List of NetworkX Graphs + + M : List of Adjacency matrices + + References + ---------- + + """ + + if order < 2: + raise ValueError + # start at the path graph rooted at its center + layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2)) + + while layout is not None: + layout = _next_tree(layout) + if layout is not None: + if create == "graph": + yield _layout_to_graph(layout) + elif create == "matrix": + yield _layout_to_matrix(layout) + layout = _next_rooted_tree(layout) + + +@nx._dispatch(graphs=None) +def number_of_nonisomorphic_trees(order): + """Returns the number of nonisomorphic trees + + Parameters + ---------- + order : int + order of the desired tree(s) + + Returns + ------- + length : Number of nonisomorphic graphs for the given order + + References + ---------- + + """ + return sum(1 for _ in nonisomorphic_trees(order)) + + +def _next_rooted_tree(predecessor, p=None): + """One iteration of the Beyer-Hedetniemi algorithm.""" + + if p is None: + p = len(predecessor) - 1 + while predecessor[p] == 1: + p -= 1 + if p == 0: + return None + + q = p - 1 + while predecessor[q] != predecessor[p] - 1: + q -= 1 + result = list(predecessor) + for i in range(p, len(result)): + result[i] = result[i - p + q] + return result + + +def _next_tree(candidate): + """One iteration of the Wright, Richmond, Odlyzko and McKay + algorithm.""" + + # valid representation of a free tree if: + # there are at least two vertices at layer 1 + # (this is always the case because we start at the path graph) + left, rest = _split_tree(candidate) + + # and the left subtree of the root + # is less high than the tree with the left subtree removed + left_height = max(left) + rest_height = max(rest) + valid = rest_height >= left_height + + if valid and rest_height == left_height: + # and, if left and rest are of the same height, + # if left does not encompass more vertices + if len(left) > len(rest): + valid = False + # and, if they have the same number or vertices, + # if left does not come after rest lexicographically + elif len(left) == len(rest) and left > rest: + valid = False + + if valid: + return candidate + else: + # jump to the next valid free tree + p = len(left) + new_candidate = _next_rooted_tree(candidate, p) + if candidate[p] > 2: + new_left, new_rest = _split_tree(new_candidate) + new_left_height = max(new_left) + suffix = range(1, new_left_height + 2) + new_candidate[-len(suffix) :] = suffix + return new_candidate + + +def _split_tree(layout): + """Returns a tuple of two layouts, one containing the left + subtree of the root vertex, and one containing the original tree + with the left subtree removed.""" + + one_found = False + m = None + for i in range(len(layout)): + if layout[i] == 1: + if one_found: + m = i + break + else: + one_found = True + + if m is None: + m = len(layout) + + left = [layout[i] - 1 for i in range(1, m)] + rest = [0] + [layout[i] for i in range(m, len(layout))] + return (left, rest) + + +def _layout_to_matrix(layout): + """Create the adjacency matrix for the tree specified by the + given layout (level sequence).""" + + result = [[0] * len(layout) for i in range(len(layout))] + stack = [] + for i in range(len(layout)): + i_level = layout[i] + if stack: + j = stack[-1] + j_level = layout[j] + while j_level >= i_level: + stack.pop() + j = stack[-1] + j_level = layout[j] + result[i][j] = result[j][i] = 1 + stack.append(i) + return result + + +def _layout_to_graph(layout): + """Create a NetworkX Graph for the tree specified by the + given layout(level sequence)""" + G = nx.Graph() + stack = [] + for i in range(len(layout)): + i_level = layout[i] + if stack: + j = stack[-1] + j_level = layout[j] + while j_level >= i_level: + stack.pop() + j = stack[-1] + j_level = layout[j] + G.add_edge(i, j) + stack.append(i) + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/random_graphs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/random_graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..75487ef6a213339af290b64f353b602c9aebaa6c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/random_graphs.py @@ -0,0 +1,1331 @@ +""" +Generators for random graphs. + +""" + +import itertools +import math +from collections import defaultdict + +import networkx as nx +from networkx.utils import py_random_state + +from .classic import complete_graph, empty_graph, path_graph, star_graph +from .degree_seq import degree_sequence_tree + +__all__ = [ + "fast_gnp_random_graph", + "gnp_random_graph", + "dense_gnm_random_graph", + "gnm_random_graph", + "erdos_renyi_graph", + "binomial_graph", + "newman_watts_strogatz_graph", + "watts_strogatz_graph", + "connected_watts_strogatz_graph", + "random_regular_graph", + "barabasi_albert_graph", + "dual_barabasi_albert_graph", + "extended_barabasi_albert_graph", + "powerlaw_cluster_graph", + "random_lobster", + "random_shell_graph", + "random_powerlaw_tree", + "random_powerlaw_tree_sequence", + "random_kernel_graph", +] + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def fast_gnp_random_graph(n, p, seed=None, directed=False): + """Returns a $G_{n,p}$ random graph, also known as an Erdős-Rényi graph or + a binomial graph. + + Parameters + ---------- + n : int + The number of nodes. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True, this function returns a directed graph. + + Notes + ----- + The $G_{n,p}$ graph algorithm chooses each of the $[n (n - 1)] / 2$ + (undirected) or $n (n - 1)$ (directed) possible edges with probability $p$. + + This algorithm [1]_ runs in $O(n + m)$ time, where `m` is the expected number of + edges, which equals $p n (n - 1) / 2$. This should be faster than + :func:`gnp_random_graph` when $p$ is small and the expected number of edges + is small (that is, the graph is sparse). + + See Also + -------- + gnp_random_graph + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + G = empty_graph(n) + + if p <= 0 or p >= 1: + return nx.gnp_random_graph(n, p, seed=seed, directed=directed) + + lp = math.log(1.0 - p) + + if directed: + G = nx.DiGraph(G) + v = 1 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= v and v < n: + w = w - v + v = v + 1 + if v < n: + G.add_edge(w, v) + + # Nodes in graph are from 0,n-1 (start with v as the second node index). + v = 1 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= v and v < n: + w = w - v + v = v + 1 + if v < n: + G.add_edge(v, w) + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def gnp_random_graph(n, p, seed=None, directed=False): + """Returns a $G_{n,p}$ random graph, also known as an Erdős-Rényi graph + or a binomial graph. + + The $G_{n,p}$ model chooses each of the possible edges with probability $p$. + + Parameters + ---------- + n : int + The number of nodes. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True, this function returns a directed graph. + + See Also + -------- + fast_gnp_random_graph + + Notes + ----- + This algorithm [2]_ runs in $O(n^2)$ time. For sparse graphs (that is, for + small values of $p$), :func:`fast_gnp_random_graph` is a faster algorithm. + + :func:`binomial_graph` and :func:`erdos_renyi_graph` are + aliases for :func:`gnp_random_graph`. + + >>> nx.binomial_graph is nx.gnp_random_graph + True + >>> nx.erdos_renyi_graph is nx.gnp_random_graph + True + + References + ---------- + .. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959). + .. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959). + """ + if directed: + edges = itertools.permutations(range(n), 2) + G = nx.DiGraph() + else: + edges = itertools.combinations(range(n), 2) + G = nx.Graph() + G.add_nodes_from(range(n)) + if p <= 0: + return G + if p >= 1: + return complete_graph(n, create_using=G) + + for e in edges: + if seed.random() < p: + G.add_edge(*e) + return G + + +# add some aliases to common names +binomial_graph = gnp_random_graph +erdos_renyi_graph = gnp_random_graph + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def dense_gnm_random_graph(n, m, seed=None): + """Returns a $G_{n,m}$ random graph. + + In the $G_{n,m}$ model, a graph is chosen uniformly at random from the set + of all graphs with $n$ nodes and $m$ edges. + + This algorithm should be faster than :func:`gnm_random_graph` for dense + graphs. + + Parameters + ---------- + n : int + The number of nodes. + m : int + The number of edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + See Also + -------- + gnm_random_graph + + Notes + ----- + Algorithm by Keith M. Briggs Mar 31, 2006. + Inspired by Knuth's Algorithm S (Selection sampling technique), + in section 3.4.2 of [1]_. + + References + ---------- + .. [1] Donald E. Knuth, The Art of Computer Programming, + Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997. + """ + mmax = n * (n - 1) // 2 + if m >= mmax: + G = complete_graph(n) + else: + G = empty_graph(n) + + if n == 1 or m >= mmax: + return G + + u = 0 + v = 1 + t = 0 + k = 0 + while True: + if seed.randrange(mmax - t) < m - k: + G.add_edge(u, v) + k += 1 + if k == m: + return G + t += 1 + v += 1 + if v == n: # go to next row of adjacency matrix + u += 1 + v = u + 1 + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def gnm_random_graph(n, m, seed=None, directed=False): + """Returns a $G_{n,m}$ random graph. + + In the $G_{n,m}$ model, a graph is chosen uniformly at random from the set + of all graphs with $n$ nodes and $m$ edges. + + This algorithm should be faster than :func:`dense_gnm_random_graph` for + sparse graphs. + + Parameters + ---------- + n : int + The number of nodes. + m : int + The number of edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + See also + -------- + dense_gnm_random_graph + + """ + if directed: + G = nx.DiGraph() + else: + G = nx.Graph() + G.add_nodes_from(range(n)) + + if n == 1: + return G + max_edges = n * (n - 1) + if not directed: + max_edges /= 2.0 + if m >= max_edges: + return complete_graph(n, create_using=G) + + nlist = list(G) + edge_count = 0 + while edge_count < m: + # generate random edge,u,v + u = seed.choice(nlist) + v = seed.choice(nlist) + if u == v or G.has_edge(u, v): + continue + else: + G.add_edge(u, v) + edge_count = edge_count + 1 + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def newman_watts_strogatz_graph(n, k, p, seed=None): + """Returns a Newman–Watts–Strogatz small-world graph. + + Parameters + ---------- + n : int + The number of nodes. + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of adding a new edge for each edge. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is + connected with its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ + is odd). Then shortcuts are created by adding new edges as follows: for + each edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest + neighbors" with probability $p$ add a new edge $(u, w)$ with + randomly-chosen existing node $w$. In contrast with + :func:`watts_strogatz_graph`, no edges are removed. + + See Also + -------- + watts_strogatz_graph + + References + ---------- + .. [1] M. E. J. Newman and D. J. Watts, + Renormalization group analysis of the small-world network model, + Physics Letters A, 263, 341, 1999. + https://doi.org/10.1016/S0375-9601(99)00757-4 + """ + if k > n: + raise nx.NetworkXError("k>=n, choose smaller k or larger n") + + # If k == n the graph return is a complete graph + if k == n: + return nx.complete_graph(n) + + G = empty_graph(n) + nlist = list(G.nodes()) + fromv = nlist + # connect the k/2 neighbors + for j in range(1, k // 2 + 1): + tov = fromv[j:] + fromv[0:j] # the first j are now last + for i in range(len(fromv)): + G.add_edge(fromv[i], tov[i]) + # for each edge u-v, with probability p, randomly select existing + # node w and add new edge u-w + e = list(G.edges()) + for u, v in e: + if seed.random() < p: + w = seed.choice(nlist) + # no self-loops and reject if edge u-w exists + # is that the correct NWS model? + while w == u or G.has_edge(u, w): + w = seed.choice(nlist) + if G.degree(u) >= n - 1: + break # skip this rewiring + else: + G.add_edge(u, w) + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def watts_strogatz_graph(n, k, p, seed=None): + """Returns a Watts–Strogatz small-world graph. + + Parameters + ---------- + n : int + The number of nodes + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of rewiring each edge + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + See Also + -------- + newman_watts_strogatz_graph + connected_watts_strogatz_graph + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is joined + to its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ is odd). + Then shortcuts are created by replacing some edges as follows: for each + edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest neighbors" + with probability $p$ replace it with a new edge $(u, w)$ with uniformly + random choice of existing node $w$. + + In contrast with :func:`newman_watts_strogatz_graph`, the random rewiring + does not increase the number of edges. The rewired graph is not guaranteed + to be connected as in :func:`connected_watts_strogatz_graph`. + + References + ---------- + .. [1] Duncan J. Watts and Steven H. Strogatz, + Collective dynamics of small-world networks, + Nature, 393, pp. 440--442, 1998. + """ + if k > n: + raise nx.NetworkXError("k>n, choose smaller k or larger n") + + # If k == n, the graph is complete not Watts-Strogatz + if k == n: + return nx.complete_graph(n) + + G = nx.Graph() + nodes = list(range(n)) # nodes are labeled 0 to n-1 + # connect each node to k/2 neighbors + for j in range(1, k // 2 + 1): + targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list + G.add_edges_from(zip(nodes, targets)) + # rewire edges from each node + # loop over all nodes in order (label) and neighbors in order (distance) + # no self loops or multiple edges allowed + for j in range(1, k // 2 + 1): # outer loop is neighbors + targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list + # inner loop in node order + for u, v in zip(nodes, targets): + if seed.random() < p: + w = seed.choice(nodes) + # Enforce no self-loops or multiple edges + while w == u or G.has_edge(u, w): + w = seed.choice(nodes) + if G.degree(u) >= n - 1: + break # skip this rewiring + else: + G.remove_edge(u, v) + G.add_edge(u, w) + return G + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None): + """Returns a connected Watts–Strogatz small-world graph. + + Attempts to generate a connected graph by repeated generation of + Watts–Strogatz small-world graphs. An exception is raised if the maximum + number of tries is exceeded. + + Parameters + ---------- + n : int + The number of nodes + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of rewiring each edge + tries : int + Number of attempts to generate a connected graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is joined + to its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ is odd). + Then shortcuts are created by replacing some edges as follows: for each + edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest neighbors" + with probability $p$ replace it with a new edge $(u, w)$ with uniformly + random choice of existing node $w$. + The entire process is repeated until a connected graph results. + + See Also + -------- + newman_watts_strogatz_graph + watts_strogatz_graph + + References + ---------- + .. [1] Duncan J. Watts and Steven H. Strogatz, + Collective dynamics of small-world networks, + Nature, 393, pp. 440--442, 1998. + """ + for i in range(tries): + # seed is an RNG so should change sequence each call + G = watts_strogatz_graph(n, k, p, seed) + if nx.is_connected(G): + return G + raise nx.NetworkXError("Maximum number of tries exceeded") + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def random_regular_graph(d, n, seed=None): + r"""Returns a random $d$-regular graph on $n$ nodes. + + A regular graph is a graph where each node has the same number of neighbors. + + The resulting graph has no self-loops or parallel edges. + + Parameters + ---------- + d : int + The degree of each node. + n : integer + The number of nodes. The value of $n \times d$ must be even. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + The nodes are numbered from $0$ to $n - 1$. + + Kim and Vu's paper [2]_ shows that this algorithm samples in an + asymptotically uniform way from the space of random graphs when + $d = O(n^{1 / 3 - \epsilon})$. + + Raises + ------ + + NetworkXError + If $n \times d$ is odd or $d$ is greater than or equal to $n$. + + References + ---------- + .. [1] A. Steger and N. Wormald, + Generating random regular graphs quickly, + Probability and Computing 8 (1999), 377-396, 1999. + https://doi.org/10.1017/S0963548399003867 + + .. [2] Jeong Han Kim and Van H. Vu, + Generating random regular graphs, + Proceedings of the thirty-fifth ACM symposium on Theory of computing, + San Diego, CA, USA, pp 213--222, 2003. + http://portal.acm.org/citation.cfm?id=780542.780576 + """ + if (n * d) % 2 != 0: + raise nx.NetworkXError("n * d must be even") + + if not 0 <= d < n: + raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied") + + if d == 0: + return empty_graph(n) + + def _suitable(edges, potential_edges): + # Helper subroutine to check if there are suitable edges remaining + # If False, the generation of the graph has failed + if not potential_edges: + return True + for s1 in potential_edges: + for s2 in potential_edges: + # Two iterators on the same dictionary are guaranteed + # to visit it in the same order if there are no + # intervening modifications. + if s1 == s2: + # Only need to consider s1-s2 pair one time + break + if s1 > s2: + s1, s2 = s2, s1 + if (s1, s2) not in edges: + return True + return False + + def _try_creation(): + # Attempt to create an edge set + + edges = set() + stubs = list(range(n)) * d + + while stubs: + potential_edges = defaultdict(lambda: 0) + seed.shuffle(stubs) + stubiter = iter(stubs) + for s1, s2 in zip(stubiter, stubiter): + if s1 > s2: + s1, s2 = s2, s1 + if s1 != s2 and ((s1, s2) not in edges): + edges.add((s1, s2)) + else: + potential_edges[s1] += 1 + potential_edges[s2] += 1 + + if not _suitable(edges, potential_edges): + return None # failed to find suitable edge set + + stubs = [ + node + for node, potential in potential_edges.items() + for _ in range(potential) + ] + return edges + + # Even though a suitable edge set exists, + # the generation of such a set is not guaranteed. + # Try repeatedly to find one. + edges = _try_creation() + while edges is None: + edges = _try_creation() + + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +def _random_subset(seq, m, rng): + """Return m unique elements from seq. + + This differs from random.sample which can return repeated + elements if seq holds repeated elements. + + Note: rng is a random.Random or numpy.random.RandomState instance. + """ + targets = set() + while len(targets) < m: + x = rng.choice(seq) + targets.add(x) + return targets + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def barabasi_albert_graph(n, m, seed=None, initial_graph=None): + """Returns a random graph using Barabási–Albert preferential attachment + + A graph of $n$ nodes is grown by attaching new nodes each with $m$ + edges that are preferentially attached to existing nodes with high degree. + + Parameters + ---------- + n : int + Number of nodes + m : int + Number of edges to attach from a new node to existing nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : Graph or None (default) + Initial network for Barabási–Albert algorithm. + It should be a connected graph for most use cases. + A copy of `initial_graph` is used. + If None, starts from a star graph on (m+1) nodes. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m < n``, or + the initial graph number of nodes m0 does not satisfy ``m <= m0 <= n``. + + References + ---------- + .. [1] A. L. Barabási and R. Albert "Emergence of scaling in + random networks", Science 286, pp 509-512, 1999. + """ + + if m < 1 or m >= n: + raise nx.NetworkXError( + f"Barabási–Albert network must have m >= 1 and m < n, m = {m}, n = {n}" + ) + + if initial_graph is None: + # Default initial graph : star graph on (m + 1) nodes + G = star_graph(m) + else: + if len(initial_graph) < m or len(initial_graph) > n: + raise nx.NetworkXError( + f"Barabási–Albert initial graph needs between m={m} and n={n} nodes" + ) + G = initial_graph.copy() + + # List of existing nodes, with nodes repeated once for each adjacent edge + repeated_nodes = [n for n, d in G.degree() for _ in range(d)] + # Start adding the other n - m0 nodes. + source = len(G) + while source < n: + # Now choose m unique nodes from the existing nodes + # Pick uniformly from repeated_nodes (preferential attachment) + targets = _random_subset(repeated_nodes, m, seed) + # Add edges to m nodes from the source. + G.add_edges_from(zip([source] * m, targets)) + # Add one node to the list for each new edge just created. + repeated_nodes.extend(targets) + # And the new node "source" has m edges to add to the list. + repeated_nodes.extend([source] * m) + + source += 1 + return G + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def dual_barabasi_albert_graph(n, m1, m2, p, seed=None, initial_graph=None): + """Returns a random graph using dual Barabási–Albert preferential attachment + + A graph of $n$ nodes is grown by attaching new nodes each with either $m_1$ + edges (with probability $p$) or $m_2$ edges (with probability $1-p$) that + are preferentially attached to existing nodes with high degree. + + Parameters + ---------- + n : int + Number of nodes + m1 : int + Number of edges to link each new node to existing nodes with probability $p$ + m2 : int + Number of edges to link each new node to existing nodes with probability $1-p$ + p : float + The probability of attaching $m_1$ edges (as opposed to $m_2$ edges) + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : Graph or None (default) + Initial network for Barabási–Albert algorithm. + A copy of `initial_graph` is used. + It should be connected for most use cases. + If None, starts from an star graph on max(m1, m2) + 1 nodes. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m1` and `m2` do not satisfy ``1 <= m1,m2 < n``, or + `p` does not satisfy ``0 <= p <= 1``, or + the initial graph number of nodes m0 does not satisfy m1, m2 <= m0 <= n. + + References + ---------- + .. [1] N. Moshiri "The dual-Barabasi-Albert model", arXiv:1810.10538. + """ + + if m1 < 1 or m1 >= n: + raise nx.NetworkXError( + f"Dual Barabási–Albert must have m1 >= 1 and m1 < n, m1 = {m1}, n = {n}" + ) + if m2 < 1 or m2 >= n: + raise nx.NetworkXError( + f"Dual Barabási–Albert must have m2 >= 1 and m2 < n, m2 = {m2}, n = {n}" + ) + if p < 0 or p > 1: + raise nx.NetworkXError( + f"Dual Barabási–Albert network must have 0 <= p <= 1, p = {p}" + ) + + # For simplicity, if p == 0 or 1, just return BA + if p == 1: + return barabasi_albert_graph(n, m1, seed) + elif p == 0: + return barabasi_albert_graph(n, m2, seed) + + if initial_graph is None: + # Default initial graph : empty graph on max(m1, m2) nodes + G = star_graph(max(m1, m2)) + else: + if len(initial_graph) < max(m1, m2) or len(initial_graph) > n: + raise nx.NetworkXError( + f"Barabási–Albert initial graph must have between " + f"max(m1, m2) = {max(m1, m2)} and n = {n} nodes" + ) + G = initial_graph.copy() + + # Target nodes for new edges + targets = list(G) + # List of existing nodes, with nodes repeated once for each adjacent edge + repeated_nodes = [n for n, d in G.degree() for _ in range(d)] + # Start adding the remaining nodes. + source = len(G) + while source < n: + # Pick which m to use (m1 or m2) + if seed.random() < p: + m = m1 + else: + m = m2 + # Now choose m unique nodes from the existing nodes + # Pick uniformly from repeated_nodes (preferential attachment) + targets = _random_subset(repeated_nodes, m, seed) + # Add edges to m nodes from the source. + G.add_edges_from(zip([source] * m, targets)) + # Add one node to the list for each new edge just created. + repeated_nodes.extend(targets) + # And the new node "source" has m edges to add to the list. + repeated_nodes.extend([source] * m) + + source += 1 + return G + + +@py_random_state(4) +@nx._dispatch(graphs=None) +def extended_barabasi_albert_graph(n, m, p, q, seed=None): + """Returns an extended Barabási–Albert model graph. + + An extended Barabási–Albert model graph is a random graph constructed + using preferential attachment. The extended model allows new edges, + rewired edges or new nodes. Based on the probabilities $p$ and $q$ + with $p + q < 1$, the growing behavior of the graph is determined as: + + 1) With $p$ probability, $m$ new edges are added to the graph, + starting from randomly chosen existing nodes and attached preferentially at the other end. + + 2) With $q$ probability, $m$ existing edges are rewired + by randomly choosing an edge and rewiring one end to a preferentially chosen node. + + 3) With $(1 - p - q)$ probability, $m$ new nodes are added to the graph + with edges attached preferentially. + + When $p = q = 0$, the model behaves just like the Barabási–Alber model. + + Parameters + ---------- + n : int + Number of nodes + m : int + Number of edges with which a new node attaches to existing nodes + p : float + Probability value for adding an edge between existing nodes. p + q < 1 + q : float + Probability value of rewiring of existing edges. p + q < 1 + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m < n`` or ``1 >= p + q`` + + References + ---------- + .. [1] Albert, R., & Barabási, A. L. (2000) + Topology of evolving networks: local events and universality + Physical review letters, 85(24), 5234. + """ + if m < 1 or m >= n: + msg = f"Extended Barabasi-Albert network needs m>=1 and m= 1: + msg = f"Extended Barabasi-Albert network needs p + q <= 1, p={p}, q={q}" + raise nx.NetworkXError(msg) + + # Add m initial nodes (m0 in barabasi-speak) + G = empty_graph(m) + + # List of nodes to represent the preferential attachment random selection. + # At the creation of the graph, all nodes are added to the list + # so that even nodes that are not connected have a chance to get selected, + # for rewiring and adding of edges. + # With each new edge, nodes at the ends of the edge are added to the list. + attachment_preference = [] + attachment_preference.extend(range(m)) + + # Start adding the other n-m nodes. The first node is m. + new_node = m + while new_node < n: + a_probability = seed.random() + + # Total number of edges of a Clique of all the nodes + clique_degree = len(G) - 1 + clique_size = (len(G) * clique_degree) / 2 + + # Adding m new edges, if there is room to add them + if a_probability < p and G.size() <= clique_size - m: + # Select the nodes where an edge can be added + eligible_nodes = [nd for nd, deg in G.degree() if deg < clique_degree] + for i in range(m): + # Choosing a random source node from eligible_nodes + src_node = seed.choice(eligible_nodes) + + # Picking a possible node that is not 'src_node' or + # neighbor with 'src_node', with preferential attachment + prohibited_nodes = list(G[src_node]) + prohibited_nodes.append(src_node) + # This will raise an exception if the sequence is empty + dest_node = seed.choice( + [nd for nd in attachment_preference if nd not in prohibited_nodes] + ) + # Adding the new edge + G.add_edge(src_node, dest_node) + + # Appending both nodes to add to their preferential attachment + attachment_preference.append(src_node) + attachment_preference.append(dest_node) + + # Adjusting the eligible nodes. Degree may be saturated. + if G.degree(src_node) == clique_degree: + eligible_nodes.remove(src_node) + if G.degree(dest_node) == clique_degree and dest_node in eligible_nodes: + eligible_nodes.remove(dest_node) + + # Rewiring m edges, if there are enough edges + elif p <= a_probability < (p + q) and m <= G.size() < clique_size: + # Selecting nodes that have at least 1 edge but that are not + # fully connected to ALL other nodes (center of star). + # These nodes are the pivot nodes of the edges to rewire + eligible_nodes = [nd for nd, deg in G.degree() if 0 < deg < clique_degree] + for i in range(m): + # Choosing a random source node + node = seed.choice(eligible_nodes) + + # The available nodes do have a neighbor at least. + neighbor_nodes = list(G[node]) + + # Choosing the other end that will get detached + src_node = seed.choice(neighbor_nodes) + + # Picking a target node that is not 'node' or + # neighbor with 'node', with preferential attachment + neighbor_nodes.append(node) + dest_node = seed.choice( + [nd for nd in attachment_preference if nd not in neighbor_nodes] + ) + # Rewire + G.remove_edge(node, src_node) + G.add_edge(node, dest_node) + + # Adjusting the preferential attachment list + attachment_preference.remove(src_node) + attachment_preference.append(dest_node) + + # Adjusting the eligible nodes. + # nodes may be saturated or isolated. + if G.degree(src_node) == 0 and src_node in eligible_nodes: + eligible_nodes.remove(src_node) + if dest_node in eligible_nodes: + if G.degree(dest_node) == clique_degree: + eligible_nodes.remove(dest_node) + else: + if G.degree(dest_node) == 1: + eligible_nodes.append(dest_node) + + # Adding new node with m edges + else: + # Select the edges' nodes by preferential attachment + targets = _random_subset(attachment_preference, m, seed) + G.add_edges_from(zip([new_node] * m, targets)) + + # Add one node to the list for each new edge just created. + attachment_preference.extend(targets) + # The new node has m edges to it, plus itself: m + 1 + attachment_preference.extend([new_node] * (m + 1)) + new_node += 1 + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def powerlaw_cluster_graph(n, m, p, seed=None): + """Holme and Kim algorithm for growing graphs with powerlaw + degree distribution and approximate average clustering. + + Parameters + ---------- + n : int + the number of nodes + m : int + the number of random edges to add for each new node + p : float, + Probability of adding a triangle after adding a random edge + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + The average clustering has a hard time getting above a certain + cutoff that depends on `m`. This cutoff is often quite low. The + transitivity (fraction of triangles to possible triangles) seems to + decrease with network size. + + It is essentially the Barabási–Albert (BA) growth model with an + extra step that each random edge is followed by a chance of + making an edge to one of its neighbors too (and thus a triangle). + + This algorithm improves on BA in the sense that it enables a + higher average clustering to be attained if desired. + + It seems possible to have a disconnected graph with this algorithm + since the initial `m` nodes may not be all linked to a new node + on the first iteration like the BA model. + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m <= n`` or `p` does not + satisfy ``0 <= p <= 1``. + + References + ---------- + .. [1] P. Holme and B. J. Kim, + "Growing scale-free networks with tunable clustering", + Phys. Rev. E, 65, 026107, 2002. + """ + + if m < 1 or n < m: + raise nx.NetworkXError(f"NetworkXError must have m>1 and m 1 or p < 0: + raise nx.NetworkXError(f"NetworkXError p must be in [0,1], p={p}") + + G = empty_graph(m) # add m initial nodes (m0 in barabasi-speak) + repeated_nodes = list(G.nodes()) # list of existing nodes to sample from + # with nodes repeated once for each adjacent edge + source = m # next node is m + while source < n: # Now add the other n-1 nodes + possible_targets = _random_subset(repeated_nodes, m, seed) + # do one preferential attachment for new node + target = possible_targets.pop() + G.add_edge(source, target) + repeated_nodes.append(target) # add one node to list for each new link + count = 1 + while count < m: # add m-1 more new links + if seed.random() < p: # clustering step: add triangle + neighborhood = [ + nbr + for nbr in G.neighbors(target) + if not G.has_edge(source, nbr) and nbr != source + ] + if neighborhood: # if there is a neighbor without a link + nbr = seed.choice(neighborhood) + G.add_edge(source, nbr) # add triangle + repeated_nodes.append(nbr) + count = count + 1 + continue # go to top of while loop + # else do preferential attachment step if above fails + target = possible_targets.pop() + G.add_edge(source, target) + repeated_nodes.append(target) + count = count + 1 + + repeated_nodes.extend([source] * m) # add source node to list m times + source += 1 + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def random_lobster(n, p1, p2, seed=None): + """Returns a random lobster graph. + + A lobster is a tree that reduces to a caterpillar when pruning all + leaf nodes. A caterpillar is a tree that reduces to a path graph + when pruning all leaf nodes; setting `p2` to zero produces a caterpillar. + + This implementation iterates on the probabilities `p1` and `p2` to add + edges at levels 1 and 2, respectively. Graphs are therefore constructed + iteratively with uniform randomness at each level rather than being selected + uniformly at random from the set of all possible lobsters. + + Parameters + ---------- + n : int + The expected number of nodes in the backbone + p1 : float + Probability of adding an edge to the backbone + p2 : float + Probability of adding an edge one level beyond backbone + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Raises + ------ + NetworkXError + If `p1` or `p2` parameters are >= 1 because the while loops would never finish. + """ + p1, p2 = abs(p1), abs(p2) + if any(p >= 1 for p in [p1, p2]): + raise nx.NetworkXError("Probability values for `p1` and `p2` must both be < 1.") + + # a necessary ingredient in any self-respecting graph library + llen = int(2 * seed.random() * n + 0.5) + L = path_graph(llen) + # build caterpillar: add edges to path graph with probability p1 + current_node = llen - 1 + for n in range(llen): + while seed.random() < p1: # add fuzzy caterpillar parts + current_node += 1 + L.add_edge(n, current_node) + cat_node = current_node + while seed.random() < p2: # add crunchy lobster bits + current_node += 1 + L.add_edge(cat_node, current_node) + return L # voila, un lobster! + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_shell_graph(constructor, seed=None): + """Returns a random shell graph for the constructor given. + + Parameters + ---------- + constructor : list of three-tuples + Represents the parameters for a shell, starting at the center + shell. Each element of the list must be of the form `(n, m, + d)`, where `n` is the number of nodes in the shell, `m` is + the number of edges in the shell, and `d` is the ratio of + inter-shell (next) edges to intra-shell edges. If `d` is zero, + there will be no intra-shell edges, and if `d` is one there + will be all possible intra-shell edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Examples + -------- + >>> constructor = [(10, 20, 0.8), (20, 40, 0.8)] + >>> G = nx.random_shell_graph(constructor) + + """ + G = empty_graph(0) + + glist = [] + intra_edges = [] + nnodes = 0 + # create gnm graphs for each shell + for n, m, d in constructor: + inter_edges = int(m * d) + intra_edges.append(m - inter_edges) + g = nx.convert_node_labels_to_integers( + gnm_random_graph(n, inter_edges, seed=seed), first_label=nnodes + ) + glist.append(g) + nnodes += n + G = nx.operators.union(G, g) + + # connect the shells randomly + for gi in range(len(glist) - 1): + nlist1 = list(glist[gi]) + nlist2 = list(glist[gi + 1]) + total_edges = intra_edges[gi] + edge_count = 0 + while edge_count < total_edges: + u = seed.choice(nlist1) + v = seed.choice(nlist2) + if u == v or G.has_edge(u, v): + continue + else: + G.add_edge(u, v) + edge_count = edge_count + 1 + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def random_powerlaw_tree(n, gamma=3, seed=None, tries=100): + """Returns a tree with a power law degree distribution. + + Parameters + ---------- + n : int + The number of nodes. + gamma : float + Exponent of the power law. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int + Number of attempts to adjust the sequence to make it a tree. + + Raises + ------ + NetworkXError + If no valid sequence is found within the maximum number of + attempts. + + Notes + ----- + A trial power law degree sequence is chosen and then elements are + swapped with new elements from a powerlaw distribution until the + sequence makes a tree (by checking, for example, that the number of + edges is one smaller than the number of nodes). + + """ + # This call may raise a NetworkXError if the number of tries is succeeded. + seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries) + G = degree_sequence_tree(seq) + return G + + +@py_random_state(2) +@nx._dispatch(graphs=None) +def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100): + """Returns a degree sequence for a tree with a power law distribution. + + Parameters + ---------- + n : int, + The number of nodes. + gamma : float + Exponent of the power law. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int + Number of attempts to adjust the sequence to make it a tree. + + Raises + ------ + NetworkXError + If no valid sequence is found within the maximum number of + attempts. + + Notes + ----- + A trial power law degree sequence is chosen and then elements are + swapped with new elements from a power law distribution until + the sequence makes a tree (by checking, for example, that the number of + edges is one smaller than the number of nodes). + + """ + # get trial sequence + z = nx.utils.powerlaw_sequence(n, exponent=gamma, seed=seed) + # round to integer values in the range [0,n] + zseq = [min(n, max(round(s), 0)) for s in z] + + # another sequence to swap values from + z = nx.utils.powerlaw_sequence(tries, exponent=gamma, seed=seed) + # round to integer values in the range [0,n] + swap = [min(n, max(round(s), 0)) for s in z] + + for deg in swap: + # If this degree sequence can be the degree sequence of a tree, return + # it. It can be a tree if the number of edges is one fewer than the + # number of nodes, or in other words, `n - sum(zseq) / 2 == 1`. We + # use an equivalent condition below that avoids floating point + # operations. + if 2 * n - sum(zseq) == 2: + return zseq + index = seed.randint(0, n - 1) + zseq[index] = swap.pop() + + raise nx.NetworkXError( + f"Exceeded max ({tries}) attempts for a valid tree sequence." + ) + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None): + r"""Returns an random graph based on the specified kernel. + + The algorithm chooses each of the $[n(n-1)]/2$ possible edges with + probability specified by a kernel $\kappa(x,y)$ [1]_. The kernel + $\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative, + bounded function. + + Parameters + ---------- + n : int + The number of nodes + kernel_integral : function + Function that returns the definite integral of the kernel $\kappa(x,y)$, + $F(y,a,b) := \int_a^b \kappa(x,y)dx$ + kernel_root: function (optional) + Function that returns the root $b$ of the equation $F(y,a,b) = r$. + If None, the root is found using :func:`scipy.optimize.brentq` + (this requires SciPy). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Notes + ----- + The kernel is specified through its definite integral which must be + provided as one of the arguments. If the integral and root of the + kernel integral can be found in $O(1)$ time then this algorithm runs in + time $O(n+m)$ where m is the expected number of edges [2]_. + + The nodes are set to integers from $0$ to $n-1$. + + Examples + -------- + Generate an Erdős–Rényi random graph $G(n,c/n)$, with kernel + $\kappa(x,y)=c$ where $c$ is the mean expected degree. + + >>> def integral(u, w, z): + ... return c * (z - w) + >>> def root(u, w, r): + ... return r / c + w + >>> c = 1 + >>> graph = nx.random_kernel_graph(1000, integral, root) + + See Also + -------- + gnp_random_graph + expected_degree_graph + + References + ---------- + .. [1] Bollobás, Béla, Janson, S. and Riordan, O. + "The phase transition in inhomogeneous random graphs", + *Random Structures Algorithms*, 31, 3--122, 2007. + + .. [2] Hagberg A, Lemons N (2015), + "Fast Generation of Sparse Random Kernel Graphs". + PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177 + """ + if kernel_root is None: + import scipy as sp + + def kernel_root(y, a, r): + def my_function(b): + return kernel_integral(y, a, b) - r + + return sp.optimize.brentq(my_function, a, 1) + + graph = nx.Graph() + graph.add_nodes_from(range(n)) + (i, j) = (1, 1) + while i < n: + r = -math.log(1 - seed.random()) # (1-seed.random()) in (0, 1] + if kernel_integral(i / n, j / n, 1) <= r: + i, j = i + 1, i + 1 + else: + j = math.ceil(n * kernel_root(i / n, j / n, r)) + graph.add_edge(i - 1, j - 1) + return graph diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/small.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/small.py new file mode 100644 index 0000000000000000000000000000000000000000..77109551c19b412926c6db4f8e19bc21ef2ce43d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/small.py @@ -0,0 +1,978 @@ +""" +Various small and named graphs, together with some compact generators. + +""" + +__all__ = [ + "LCF_graph", + "bull_graph", + "chvatal_graph", + "cubical_graph", + "desargues_graph", + "diamond_graph", + "dodecahedral_graph", + "frucht_graph", + "heawood_graph", + "hoffman_singleton_graph", + "house_graph", + "house_x_graph", + "icosahedral_graph", + "krackhardt_kite_graph", + "moebius_kantor_graph", + "octahedral_graph", + "pappus_graph", + "petersen_graph", + "sedgewick_maze_graph", + "tetrahedral_graph", + "truncated_cube_graph", + "truncated_tetrahedron_graph", + "tutte_graph", +] + +from functools import wraps + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.generators.classic import ( + complete_graph, + cycle_graph, + empty_graph, + path_graph, +) + + +def _raise_on_directed(func): + """ + A decorator which inspects the `create_using` argument and raises a + NetworkX exception when `create_using` is a DiGraph (class or instance) for + graph generators that do not support directed outputs. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + if kwargs.get("create_using") is not None: + G = nx.empty_graph(create_using=kwargs["create_using"]) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + return func(*args, **kwargs) + + return wrapper + + +@nx._dispatch(graphs=None) +def LCF_graph(n, shift_list, repeats, create_using=None): + """ + Return the cubic graph specified in LCF notation. + + LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed + notation used in the generation of various cubic Hamiltonian + graphs of high symmetry. See, for example, dodecahedral_graph, + desargues_graph, heawood_graph and pappus_graph below. + + n (number of nodes) + The starting graph is the n-cycle with nodes 0,...,n-1. + (The null graph is returned if n < 0.) + + shift_list = [s1,s2,..,sk], a list of integer shifts mod n, + + repeats + integer specifying the number of times that shifts in shift_list + are successively applied to each v_current in the n-cycle + to generate an edge between v_current and v_current+shift mod n. + + For v1 cycling through the n-cycle a total of k*repeats + with shift cycling through shiftlist repeats times connect + v1 with v1+shift mod n + + The utility graph $K_{3,3}$ + + >>> G = nx.LCF_graph(6, [3, -3], 3) + + The Heawood graph + + >>> G = nx.LCF_graph(14, [5, -5], 7) + + See http://mathworld.wolfram.com/LCFNotation.html for a description + and references. + + """ + if n <= 0: + return empty_graph(0, create_using) + + # start with the n-cycle + G = cycle_graph(n, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + G.name = "LCF_graph" + nodes = sorted(G) + + n_extra_edges = repeats * len(shift_list) + # edges are added n_extra_edges times + # (not all of these need be new) + if n_extra_edges < 1: + return G + + for i in range(n_extra_edges): + shift = shift_list[i % len(shift_list)] # cycle through shift_list + v1 = nodes[i % n] # cycle repeatedly through nodes + v2 = nodes[(i + shift) % n] + G.add_edge(v1, v2) + return G + + +# ------------------------------------------------------------------------------- +# Various small and named graphs +# ------------------------------------------------------------------------------- + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def bull_graph(create_using=None): + """ + Returns the Bull Graph + + The Bull Graph has 5 nodes and 5 edges. It is a planar undirected + graph in the form of a triangle with two disjoint pendant edges [1]_ + The name comes from the triangle and pendant edges representing + respectively the body and legs of a bull. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + A bull graph with 5 nodes + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bull_graph. + + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 4], 3: [1], 4: [2]}, + create_using=create_using, + ) + G.name = "Bull Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def chvatal_graph(create_using=None): + """ + Returns the Chvátal Graph + + The Chvátal Graph is an undirected graph with 12 nodes and 24 edges [1]_. + It has 370 distinct (directed) Hamiltonian cycles, giving a unique generalized + LCF notation of order 4, two of order 6 , and 43 of order 1 [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + The Chvátal graph with 12 nodes and 24 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Chv%C3%A1tal_graph + .. [2] https://mathworld.wolfram.com/ChvatalGraph.html + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 4, 6, 9], + 1: [2, 5, 7], + 2: [3, 6, 8], + 3: [4, 7, 9], + 4: [5, 8], + 5: [10, 11], + 6: [10, 11], + 7: [8, 11], + 8: [10], + 9: [10, 11], + }, + create_using=create_using, + ) + G.name = "Chvatal Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def cubical_graph(create_using=None): + """ + Returns the 3-regular Platonic Cubical Graph + + The skeleton of the cube (the nodes and edges) form a graph, with 8 + nodes, and 12 edges. It is a special case of the hypercube graph. + It is one of 5 Platonic graphs, each a skeleton of its + Platonic solid [1]_. + Such graphs arise in parallel processing in computers. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + A cubical graph with 8 nodes and 12 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Cube#Cubical_graph + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 3, 4], + 1: [0, 2, 7], + 2: [1, 3, 6], + 3: [0, 2, 5], + 4: [0, 5, 7], + 5: [3, 4, 6], + 6: [2, 5, 7], + 7: [1, 4, 6], + }, + create_using=create_using, + ) + G.name = "Platonic Cubical Graph" + return G + + +@nx._dispatch(graphs=None) +def desargues_graph(create_using=None): + """ + Returns the Desargues Graph + + The Desargues Graph is a non-planar, distance-transitive cubic graph + with 20 nodes and 30 edges [1]_. + It is a symmetric graph. It can be represented in LCF notation + as [5,-5,9,-9]^5 [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Desargues Graph with 20 nodes and 30 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Desargues_graph + .. [2] https://mathworld.wolfram.com/DesarguesGraph.html + """ + G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) + G.name = "Desargues Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def diamond_graph(create_using=None): + """ + Returns the Diamond graph + + The Diamond Graph is planar undirected graph with 4 nodes and 5 edges. + It is also sometimes known as the double triangle graph or kite graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Diamond Graph with 4 nodes and 5 edges + + References + ---------- + .. [1] https://mathworld.wolfram.com/DiamondGraph.html + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}, create_using=create_using + ) + G.name = "Diamond Graph" + return G + + +@nx._dispatch(graphs=None) +def dodecahedral_graph(create_using=None): + """ + Returns the Platonic Dodecahedral graph. + + The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the + dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_. + It can be described in LCF notation as: + ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Dodecahedral Graph with 20 nodes and 30 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph + .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html + + """ + G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using) + G.name = "Dodecahedral Graph" + return G + + +@nx._dispatch(graphs=None) +def frucht_graph(create_using=None): + """ + Returns the Frucht Graph. + + The Frucht Graph is the smallest cubical graph whose + automorphism group consists only of the identity element [1]_. + It has 12 nodes and 18 edges and no nontrivial symmetries. + It is planar and Hamiltonian [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Frucht Graph with 12 nodes and 18 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Frucht_graph + .. [2] https://mathworld.wolfram.com/FruchtGraph.html + + """ + G = cycle_graph(7, create_using) + G.add_edges_from( + [ + [0, 7], + [1, 7], + [2, 8], + [3, 9], + [4, 9], + [5, 10], + [6, 10], + [7, 11], + [8, 11], + [8, 9], + [10, 11], + ] + ) + + G.name = "Frucht Graph" + return G + + +@nx._dispatch(graphs=None) +def heawood_graph(create_using=None): + """ + Returns the Heawood Graph, a (3,6) cage. + + The Heawood Graph is an undirected graph with 14 nodes and 21 edges, + named after Percy John Heawood [1]_. + It is cubic symmetric, nonplanar, Hamiltonian, and can be represented + in LCF notation as ``[5,-5]^7`` [2]_. + It is the unique (3,6)-cage: the regular cubic graph of girth 6 with + minimal number of vertices [3]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Heawood Graph with 14 nodes and 21 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Heawood_graph + .. [2] https://mathworld.wolfram.com/HeawoodGraph.html + .. [3] https://www.win.tue.nl/~aeb/graphs/Heawood.html + + """ + G = LCF_graph(14, [5, -5], 7, create_using) + G.name = "Heawood Graph" + return G + + +@nx._dispatch(graphs=None) +def hoffman_singleton_graph(): + """ + Returns the Hoffman-Singleton Graph. + + The Hoffman–Singleton graph is a symmetrical undirected graph + with 50 nodes and 175 edges. + All indices lie in ``Z % 5``: that is, the integers mod 5 [1]_. + It is the only regular graph of vertex degree 7, diameter 2, and girth 5. + It is the unique (7,5)-cage graph and Moore graph, and contains many + copies of the Petersen graph [2]_. + + Returns + ------- + G : networkx Graph + Hoffman–Singleton Graph with 50 nodes and 175 edges + + Notes + ----- + Constructed from pentagon and pentagram as follows: Take five pentagons $P_h$ + and five pentagrams $Q_i$ . Join vertex $j$ of $P_h$ to vertex $h·i+j$ of $Q_i$ [3]_. + + References + ---------- + .. [1] https://blogs.ams.org/visualinsight/2016/02/01/hoffman-singleton-graph/ + .. [2] https://mathworld.wolfram.com/Hoffman-SingletonGraph.html + .. [3] https://en.wikipedia.org/wiki/Hoffman%E2%80%93Singleton_graph + + """ + G = nx.Graph() + for i in range(5): + for j in range(5): + G.add_edge(("pentagon", i, j), ("pentagon", i, (j - 1) % 5)) + G.add_edge(("pentagon", i, j), ("pentagon", i, (j + 1) % 5)) + G.add_edge(("pentagram", i, j), ("pentagram", i, (j - 2) % 5)) + G.add_edge(("pentagram", i, j), ("pentagram", i, (j + 2) % 5)) + for k in range(5): + G.add_edge(("pentagon", i, j), ("pentagram", k, (i * k + j) % 5)) + G = nx.convert_node_labels_to_integers(G) + G.name = "Hoffman-Singleton Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def house_graph(create_using=None): + """ + Returns the House graph (square with triangle on top) + + The house graph is a simple undirected graph with + 5 nodes and 6 edges [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + House graph in the form of a square with a triangle on top + + References + ---------- + .. [1] https://mathworld.wolfram.com/HouseGraph.html + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 3], 2: [0, 3, 4], 3: [1, 2, 4], 4: [2, 3]}, + create_using=create_using, + ) + G.name = "House Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def house_x_graph(create_using=None): + """ + Returns the House graph with a cross inside the house square. + + The House X-graph is the House graph plus the two edges connecting diagonally + opposite vertices of the square base. It is also one of the two graphs + obtained by removing two edges from the pentatope graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + House graph with diagonal vertices connected + + References + ---------- + .. [1] https://mathworld.wolfram.com/HouseGraph.html + """ + G = house_graph(create_using) + G.add_edges_from([(0, 3), (1, 2)]) + G.name = "House-with-X-inside Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def icosahedral_graph(create_using=None): + """ + Returns the Platonic Icosahedral graph. + + The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph + whose nodes have the connectivity of the icosahedron. It is undirected, + regular and Hamiltonian [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Icosahedral graph with 12 nodes and 30 edges. + + References + ---------- + .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html + """ + G = nx.from_dict_of_lists( + { + 0: [1, 5, 7, 8, 11], + 1: [2, 5, 6, 8], + 2: [3, 6, 8, 9], + 3: [4, 6, 9, 10], + 4: [5, 6, 10, 11], + 5: [6, 11], + 7: [8, 9, 10, 11], + 8: [9], + 9: [10], + 10: [11], + }, + create_using=create_using, + ) + G.name = "Platonic Icosahedral Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def krackhardt_kite_graph(create_using=None): + """ + Returns the Krackhardt Kite Social Network. + + A 10 actor social network introduced by David Krackhardt + to illustrate different centrality measures [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Krackhardt Kite graph with 10 nodes and 18 edges + + Notes + ----- + The traditional labeling is: + Andre=1, Beverley=2, Carol=3, Diane=4, + Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10. + + References + ---------- + .. [1] Krackhardt, David. "Assessing the Political Landscape: Structure, + Cognition, and Power in Organizations". Administrative Science Quarterly. + 35 (2): 342–369. doi:10.2307/2393394. JSTOR 2393394. June 1990. + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 3, 5], + 1: [0, 3, 4, 6], + 2: [0, 3, 5], + 3: [0, 1, 2, 4, 5, 6], + 4: [1, 3, 6], + 5: [0, 2, 3, 6, 7], + 6: [1, 3, 4, 5, 7], + 7: [5, 6, 8], + 8: [7, 9], + 9: [8], + }, + create_using=create_using, + ) + G.name = "Krackhardt Kite Social Network" + return G + + +@nx._dispatch(graphs=None) +def moebius_kantor_graph(create_using=None): + """ + Returns the Moebius-Kantor graph. + + The Möbius-Kantor graph is the cubic symmetric graph on 16 nodes. + Its LCF notation is [5,-5]^8, and it is isomorphic to the generalized + Petersen graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Moebius-Kantor graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/M%C3%B6bius%E2%80%93Kantor_graph + + """ + G = LCF_graph(16, [5, -5], 8, create_using) + G.name = "Moebius-Kantor Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def octahedral_graph(create_using=None): + """ + Returns the Platonic Octahedral graph. + + The octahedral graph is the 6-node 12-edge Platonic graph having the + connectivity of the octahedron [1]_. If 6 couples go to a party, + and each person shakes hands with every person except his or her partner, + then this graph describes the set of handshakes that take place; + for this reason it is also called the cocktail party graph [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Octahedral graph + + References + ---------- + .. [1] https://mathworld.wolfram.com/OctahedralGraph.html + .. [2] https://en.wikipedia.org/wiki/Tur%C3%A1n_graph#Special_cases + + """ + G = nx.from_dict_of_lists( + {0: [1, 2, 3, 4], 1: [2, 3, 5], 2: [4, 5], 3: [4, 5], 4: [5]}, + create_using=create_using, + ) + G.name = "Platonic Octahedral Graph" + return G + + +@nx._dispatch(graphs=None) +def pappus_graph(): + """ + Returns the Pappus graph. + + The Pappus graph is a cubic symmetric distance-regular graph with 18 nodes + and 27 edges. It is Hamiltonian and can be represented in LCF notation as + [5,7,-7,7,-7,-5]^3 [1]_. + + Returns + ------- + G : networkx Graph + Pappus graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Pappus_graph + """ + G = LCF_graph(18, [5, 7, -7, 7, -7, -5], 3) + G.name = "Pappus Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def petersen_graph(create_using=None): + """ + Returns the Petersen graph. + + The Peterson graph is a cubic, undirected graph with 10 nodes and 15 edges [1]_. + Julius Petersen constructed the graph as the smallest counterexample + against the claim that a connected bridgeless cubic graph + has an edge colouring with three colours [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Petersen graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Petersen_graph + .. [2] https://www.win.tue.nl/~aeb/drg/graphs/Petersen.html + """ + G = nx.from_dict_of_lists( + { + 0: [1, 4, 5], + 1: [0, 2, 6], + 2: [1, 3, 7], + 3: [2, 4, 8], + 4: [3, 0, 9], + 5: [0, 7, 8], + 6: [1, 8, 9], + 7: [2, 5, 9], + 8: [3, 5, 6], + 9: [4, 6, 7], + }, + create_using=create_using, + ) + G.name = "Petersen Graph" + return G + + +@nx._dispatch(graphs=None) +def sedgewick_maze_graph(create_using=None): + """ + Return a small maze with a cycle. + + This is the maze used in Sedgewick, 3rd Edition, Part 5, Graph + Algorithms, Chapter 18, e.g. Figure 18.2 and following [1]_. + Nodes are numbered 0,..,7 + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Small maze with a cycle + + References + ---------- + .. [1] Figure 18.2, Chapter 18, Graph Algorithms (3rd Ed), Sedgewick + """ + G = empty_graph(0, create_using) + G.add_nodes_from(range(8)) + G.add_edges_from([[0, 2], [0, 7], [0, 5]]) + G.add_edges_from([[1, 7], [2, 6]]) + G.add_edges_from([[3, 4], [3, 5]]) + G.add_edges_from([[4, 5], [4, 7], [4, 6]]) + G.name = "Sedgewick Maze" + return G + + +@nx._dispatch(graphs=None) +def tetrahedral_graph(create_using=None): + """ + Returns the 3-regular Platonic Tetrahedral graph. + + Tetrahedral graph has 4 nodes and 6 edges. It is a + special case of the complete graph, K4, and wheel graph, W4. + It is one of the 5 platonic graphs [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Tetrahedral Graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tetrahedron#Tetrahedral_graph + + """ + G = complete_graph(4, create_using) + G.name = "Platonic Tetrahedral Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def truncated_cube_graph(create_using=None): + """ + Returns the skeleton of the truncated cube. + + The truncated cube is an Archimedean solid with 14 regular + faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_. + The truncated cube is created by truncating (cutting off) the tips + of the cube one third of the way into each edge [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Skeleton of the truncated cube + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Truncated_cube + .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 4], + 1: [11, 14], + 2: [3, 4], + 3: [6, 8], + 4: [5], + 5: [16, 18], + 6: [7, 8], + 7: [10, 12], + 8: [9], + 9: [17, 20], + 10: [11, 12], + 11: [14], + 12: [13], + 13: [21, 22], + 14: [15], + 15: [19, 23], + 16: [17, 18], + 17: [20], + 18: [19], + 19: [23], + 20: [21], + 21: [22], + 22: [23], + }, + create_using=create_using, + ) + G.name = "Truncated Cube Graph" + return G + + +@nx._dispatch(graphs=None) +def truncated_tetrahedron_graph(create_using=None): + """ + Returns the skeleton of the truncated Platonic tetrahedron. + + The truncated tetrahedron is an Archimedean solid with 4 regular hexagonal faces, + 4 equilateral triangle faces, 12 nodes and 18 edges. It can be constructed by truncating + all 4 vertices of a regular tetrahedron at one third of the original edge length [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Skeleton of the truncated tetrahedron + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Truncated_tetrahedron + + """ + G = path_graph(12, create_using) + G.add_edges_from([(0, 2), (0, 9), (1, 6), (3, 11), (4, 11), (5, 7), (8, 10)]) + G.name = "Truncated Tetrahedron Graph" + return G + + +@_raise_on_directed +@nx._dispatch(graphs=None) +def tutte_graph(create_using=None): + """ + Returns the Tutte graph. + + The Tutte graph is a cubic polyhedral, non-Hamiltonian graph. It has + 46 nodes and 69 edges. + It is a counterexample to Tait's conjecture that every 3-regular polyhedron + has a Hamiltonian cycle. + It can be realized geometrically from a tetrahedron by multiply truncating + three of its vertices [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Tutte graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tutte_graph + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 3], + 1: [4, 26], + 2: [10, 11], + 3: [18, 19], + 4: [5, 33], + 5: [6, 29], + 6: [7, 27], + 7: [8, 14], + 8: [9, 38], + 9: [10, 37], + 10: [39], + 11: [12, 39], + 12: [13, 35], + 13: [14, 15], + 14: [34], + 15: [16, 22], + 16: [17, 44], + 17: [18, 43], + 18: [45], + 19: [20, 45], + 20: [21, 41], + 21: [22, 23], + 22: [40], + 23: [24, 27], + 24: [25, 32], + 25: [26, 31], + 26: [33], + 27: [28], + 28: [29, 32], + 29: [30], + 30: [31, 33], + 31: [32], + 34: [35, 38], + 35: [36], + 36: [37, 39], + 37: [38], + 40: [41, 44], + 41: [42], + 42: [43, 45], + 43: [44], + }, + create_using=create_using, + ) + G.name = "Tutte's Graph" + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/social.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/social.py new file mode 100644 index 0000000000000000000000000000000000000000..5391c834acf9748fe1d07db3ea2410b5103b3645 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/social.py @@ -0,0 +1,546 @@ +""" +Famous social networks. +""" +import networkx as nx + +__all__ = [ + "karate_club_graph", + "davis_southern_women_graph", + "florentine_families_graph", + "les_miserables_graph", +] + + +@nx._dispatch(graphs=None) +def karate_club_graph(): + """Returns Zachary's Karate Club graph. + + Each node in the returned graph has a node attribute 'club' that + indicates the name of the club to which the member represented by that node + belongs, either 'Mr. Hi' or 'Officer'. Each edge has a weight based on the + number of contexts in which that edge's incident node members interacted. + + Examples + -------- + To get the name of the club to which a node belongs:: + + >>> G = nx.karate_club_graph() + >>> G.nodes[5]["club"] + 'Mr. Hi' + >>> G.nodes[9]["club"] + 'Officer' + + References + ---------- + .. [1] Zachary, Wayne W. + "An Information Flow Model for Conflict and Fission in Small Groups." + *Journal of Anthropological Research*, 33, 452--473, (1977). + """ + # Create the set of all members, and the members of each club. + all_members = set(range(34)) + club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21} + # club2 = all_members - club1 + + G = nx.Graph() + G.add_nodes_from(all_members) + G.name = "Zachary's Karate Club" + + zacharydat = """\ +0 4 5 3 3 3 3 2 2 0 2 3 2 3 0 0 0 2 0 2 0 2 0 0 0 0 0 0 0 0 0 2 0 0 +4 0 6 3 0 0 0 4 0 0 0 0 0 5 0 0 0 1 0 2 0 2 0 0 0 0 0 0 0 0 2 0 0 0 +5 6 0 3 0 0 0 4 5 1 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 0 0 0 3 0 +3 3 3 0 0 0 0 3 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 2 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 5 0 0 0 3 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 2 5 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 4 4 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 4 3 +0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 +2 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 5 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 2 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 4 +0 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 +2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 +2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 4 0 2 0 0 5 4 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 3 0 0 0 2 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 2 0 0 0 0 0 0 7 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 2 +0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 3 0 0 0 0 0 0 0 0 4 +0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 4 0 0 0 0 0 3 2 +0 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3 +2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 7 0 0 2 0 0 0 4 4 +0 0 2 0 0 0 0 0 3 0 0 0 0 0 3 3 0 0 1 0 3 0 2 5 0 0 0 0 0 4 3 4 0 5 +0 0 0 0 0 0 0 0 4 2 0 0 0 3 2 4 0 0 2 1 1 0 3 4 0 0 2 4 2 2 3 4 5 0""" + + for row, line in enumerate(zacharydat.split("\n")): + thisrow = [int(b) for b in line.split()] + for col, entry in enumerate(thisrow): + if entry >= 1: + G.add_edge(row, col, weight=entry) + + # Add the name of each member's club as a node attribute. + for v in G: + G.nodes[v]["club"] = "Mr. Hi" if v in club1 else "Officer" + return G + + +@nx._dispatch(graphs=None) +def davis_southern_women_graph(): + """Returns Davis Southern women social network. + + This is a bipartite graph. + + References + ---------- + .. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South. + University of Chicago Press, Chicago, IL. + """ + G = nx.Graph() + # Top nodes + women = [ + "Evelyn Jefferson", + "Laura Mandeville", + "Theresa Anderson", + "Brenda Rogers", + "Charlotte McDowd", + "Frances Anderson", + "Eleanor Nye", + "Pearl Oglethorpe", + "Ruth DeSand", + "Verne Sanderson", + "Myra Liddel", + "Katherina Rogers", + "Sylvia Avondale", + "Nora Fayette", + "Helen Lloyd", + "Dorothy Murchison", + "Olivia Carleton", + "Flora Price", + ] + G.add_nodes_from(women, bipartite=0) + # Bottom nodes + events = [ + "E1", + "E2", + "E3", + "E4", + "E5", + "E6", + "E7", + "E8", + "E9", + "E10", + "E11", + "E12", + "E13", + "E14", + ] + G.add_nodes_from(events, bipartite=1) + + G.add_edges_from( + [ + ("Evelyn Jefferson", "E1"), + ("Evelyn Jefferson", "E2"), + ("Evelyn Jefferson", "E3"), + ("Evelyn Jefferson", "E4"), + ("Evelyn Jefferson", "E5"), + ("Evelyn Jefferson", "E6"), + ("Evelyn Jefferson", "E8"), + ("Evelyn Jefferson", "E9"), + ("Laura Mandeville", "E1"), + ("Laura Mandeville", "E2"), + ("Laura Mandeville", "E3"), + ("Laura Mandeville", "E5"), + ("Laura Mandeville", "E6"), + ("Laura Mandeville", "E7"), + ("Laura Mandeville", "E8"), + ("Theresa Anderson", "E2"), + ("Theresa Anderson", "E3"), + ("Theresa Anderson", "E4"), + ("Theresa Anderson", "E5"), + ("Theresa Anderson", "E6"), + ("Theresa Anderson", "E7"), + ("Theresa Anderson", "E8"), + ("Theresa Anderson", "E9"), + ("Brenda Rogers", "E1"), + ("Brenda Rogers", "E3"), + ("Brenda Rogers", "E4"), + ("Brenda Rogers", "E5"), + ("Brenda Rogers", "E6"), + ("Brenda Rogers", "E7"), + ("Brenda Rogers", "E8"), + ("Charlotte McDowd", "E3"), + ("Charlotte McDowd", "E4"), + ("Charlotte McDowd", "E5"), + ("Charlotte McDowd", "E7"), + ("Frances Anderson", "E3"), + ("Frances Anderson", "E5"), + ("Frances Anderson", "E6"), + ("Frances Anderson", "E8"), + ("Eleanor Nye", "E5"), + ("Eleanor Nye", "E6"), + ("Eleanor Nye", "E7"), + ("Eleanor Nye", "E8"), + ("Pearl Oglethorpe", "E6"), + ("Pearl Oglethorpe", "E8"), + ("Pearl Oglethorpe", "E9"), + ("Ruth DeSand", "E5"), + ("Ruth DeSand", "E7"), + ("Ruth DeSand", "E8"), + ("Ruth DeSand", "E9"), + ("Verne Sanderson", "E7"), + ("Verne Sanderson", "E8"), + ("Verne Sanderson", "E9"), + ("Verne Sanderson", "E12"), + ("Myra Liddel", "E8"), + ("Myra Liddel", "E9"), + ("Myra Liddel", "E10"), + ("Myra Liddel", "E12"), + ("Katherina Rogers", "E8"), + ("Katherina Rogers", "E9"), + ("Katherina Rogers", "E10"), + ("Katherina Rogers", "E12"), + ("Katherina Rogers", "E13"), + ("Katherina Rogers", "E14"), + ("Sylvia Avondale", "E7"), + ("Sylvia Avondale", "E8"), + ("Sylvia Avondale", "E9"), + ("Sylvia Avondale", "E10"), + ("Sylvia Avondale", "E12"), + ("Sylvia Avondale", "E13"), + ("Sylvia Avondale", "E14"), + ("Nora Fayette", "E6"), + ("Nora Fayette", "E7"), + ("Nora Fayette", "E9"), + ("Nora Fayette", "E10"), + ("Nora Fayette", "E11"), + ("Nora Fayette", "E12"), + ("Nora Fayette", "E13"), + ("Nora Fayette", "E14"), + ("Helen Lloyd", "E7"), + ("Helen Lloyd", "E8"), + ("Helen Lloyd", "E10"), + ("Helen Lloyd", "E11"), + ("Helen Lloyd", "E12"), + ("Dorothy Murchison", "E8"), + ("Dorothy Murchison", "E9"), + ("Olivia Carleton", "E9"), + ("Olivia Carleton", "E11"), + ("Flora Price", "E9"), + ("Flora Price", "E11"), + ] + ) + G.graph["top"] = women + G.graph["bottom"] = events + return G + + +@nx._dispatch(graphs=None) +def florentine_families_graph(): + """Returns Florentine families graph. + + References + ---------- + .. [1] Ronald L. Breiger and Philippa E. Pattison + Cumulated social roles: The duality of persons and their algebras,1 + Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256 + """ + G = nx.Graph() + G.add_edge("Acciaiuoli", "Medici") + G.add_edge("Castellani", "Peruzzi") + G.add_edge("Castellani", "Strozzi") + G.add_edge("Castellani", "Barbadori") + G.add_edge("Medici", "Barbadori") + G.add_edge("Medici", "Ridolfi") + G.add_edge("Medici", "Tornabuoni") + G.add_edge("Medici", "Albizzi") + G.add_edge("Medici", "Salviati") + G.add_edge("Salviati", "Pazzi") + G.add_edge("Peruzzi", "Strozzi") + G.add_edge("Peruzzi", "Bischeri") + G.add_edge("Strozzi", "Ridolfi") + G.add_edge("Strozzi", "Bischeri") + G.add_edge("Ridolfi", "Tornabuoni") + G.add_edge("Tornabuoni", "Guadagni") + G.add_edge("Albizzi", "Ginori") + G.add_edge("Albizzi", "Guadagni") + G.add_edge("Bischeri", "Guadagni") + G.add_edge("Guadagni", "Lamberteschi") + return G + + +@nx._dispatch(graphs=None) +def les_miserables_graph(): + """Returns coappearance network of characters in the novel Les Miserables. + + References + ---------- + .. [1] D. E. Knuth, 1993. + The Stanford GraphBase: a platform for combinatorial computing, + pp. 74-87. New York: AcM Press. + """ + G = nx.Graph() + G.add_edge("Napoleon", "Myriel", weight=1) + G.add_edge("MlleBaptistine", "Myriel", weight=8) + G.add_edge("MmeMagloire", "Myriel", weight=10) + G.add_edge("MmeMagloire", "MlleBaptistine", weight=6) + G.add_edge("CountessDeLo", "Myriel", weight=1) + G.add_edge("Geborand", "Myriel", weight=1) + G.add_edge("Champtercier", "Myriel", weight=1) + G.add_edge("Cravatte", "Myriel", weight=1) + G.add_edge("Count", "Myriel", weight=2) + G.add_edge("OldMan", "Myriel", weight=1) + G.add_edge("Valjean", "Labarre", weight=1) + G.add_edge("Valjean", "MmeMagloire", weight=3) + G.add_edge("Valjean", "MlleBaptistine", weight=3) + G.add_edge("Valjean", "Myriel", weight=5) + G.add_edge("Marguerite", "Valjean", weight=1) + G.add_edge("MmeDeR", "Valjean", weight=1) + G.add_edge("Isabeau", "Valjean", weight=1) + G.add_edge("Gervais", "Valjean", weight=1) + G.add_edge("Listolier", "Tholomyes", weight=4) + G.add_edge("Fameuil", "Tholomyes", weight=4) + G.add_edge("Fameuil", "Listolier", weight=4) + G.add_edge("Blacheville", "Tholomyes", weight=4) + G.add_edge("Blacheville", "Listolier", weight=4) + G.add_edge("Blacheville", "Fameuil", weight=4) + G.add_edge("Favourite", "Tholomyes", weight=3) + G.add_edge("Favourite", "Listolier", weight=3) + G.add_edge("Favourite", "Fameuil", weight=3) + G.add_edge("Favourite", "Blacheville", weight=4) + G.add_edge("Dahlia", "Tholomyes", weight=3) + G.add_edge("Dahlia", "Listolier", weight=3) + G.add_edge("Dahlia", "Fameuil", weight=3) + G.add_edge("Dahlia", "Blacheville", weight=3) + G.add_edge("Dahlia", "Favourite", weight=5) + G.add_edge("Zephine", "Tholomyes", weight=3) + G.add_edge("Zephine", "Listolier", weight=3) + G.add_edge("Zephine", "Fameuil", weight=3) + G.add_edge("Zephine", "Blacheville", weight=3) + G.add_edge("Zephine", "Favourite", weight=4) + G.add_edge("Zephine", "Dahlia", weight=4) + G.add_edge("Fantine", "Tholomyes", weight=3) + G.add_edge("Fantine", "Listolier", weight=3) + G.add_edge("Fantine", "Fameuil", weight=3) + G.add_edge("Fantine", "Blacheville", weight=3) + G.add_edge("Fantine", "Favourite", weight=4) + G.add_edge("Fantine", "Dahlia", weight=4) + G.add_edge("Fantine", "Zephine", weight=4) + G.add_edge("Fantine", "Marguerite", weight=2) + G.add_edge("Fantine", "Valjean", weight=9) + G.add_edge("MmeThenardier", "Fantine", weight=2) + G.add_edge("MmeThenardier", "Valjean", weight=7) + G.add_edge("Thenardier", "MmeThenardier", weight=13) + G.add_edge("Thenardier", "Fantine", weight=1) + G.add_edge("Thenardier", "Valjean", weight=12) + G.add_edge("Cosette", "MmeThenardier", weight=4) + G.add_edge("Cosette", "Valjean", weight=31) + G.add_edge("Cosette", "Tholomyes", weight=1) + G.add_edge("Cosette", "Thenardier", weight=1) + G.add_edge("Javert", "Valjean", weight=17) + G.add_edge("Javert", "Fantine", weight=5) + G.add_edge("Javert", "Thenardier", weight=5) + G.add_edge("Javert", "MmeThenardier", weight=1) + G.add_edge("Javert", "Cosette", weight=1) + G.add_edge("Fauchelevent", "Valjean", weight=8) + G.add_edge("Fauchelevent", "Javert", weight=1) + G.add_edge("Bamatabois", "Fantine", weight=1) + G.add_edge("Bamatabois", "Javert", weight=1) + G.add_edge("Bamatabois", "Valjean", weight=2) + G.add_edge("Perpetue", "Fantine", weight=1) + G.add_edge("Simplice", "Perpetue", weight=2) + G.add_edge("Simplice", "Valjean", weight=3) + G.add_edge("Simplice", "Fantine", weight=2) + G.add_edge("Simplice", "Javert", weight=1) + G.add_edge("Scaufflaire", "Valjean", weight=1) + G.add_edge("Woman1", "Valjean", weight=2) + G.add_edge("Woman1", "Javert", weight=1) + G.add_edge("Judge", "Valjean", weight=3) + G.add_edge("Judge", "Bamatabois", weight=2) + G.add_edge("Champmathieu", "Valjean", weight=3) + G.add_edge("Champmathieu", "Judge", weight=3) + G.add_edge("Champmathieu", "Bamatabois", weight=2) + G.add_edge("Brevet", "Judge", weight=2) + G.add_edge("Brevet", "Champmathieu", weight=2) + G.add_edge("Brevet", "Valjean", weight=2) + G.add_edge("Brevet", "Bamatabois", weight=1) + G.add_edge("Chenildieu", "Judge", weight=2) + G.add_edge("Chenildieu", "Champmathieu", weight=2) + G.add_edge("Chenildieu", "Brevet", weight=2) + G.add_edge("Chenildieu", "Valjean", weight=2) + G.add_edge("Chenildieu", "Bamatabois", weight=1) + G.add_edge("Cochepaille", "Judge", weight=2) + G.add_edge("Cochepaille", "Champmathieu", weight=2) + G.add_edge("Cochepaille", "Brevet", weight=2) + G.add_edge("Cochepaille", "Chenildieu", weight=2) + G.add_edge("Cochepaille", "Valjean", weight=2) + G.add_edge("Cochepaille", "Bamatabois", weight=1) + G.add_edge("Pontmercy", "Thenardier", weight=1) + G.add_edge("Boulatruelle", "Thenardier", weight=1) + G.add_edge("Eponine", "MmeThenardier", weight=2) + G.add_edge("Eponine", "Thenardier", weight=3) + G.add_edge("Anzelma", "Eponine", weight=2) + G.add_edge("Anzelma", "Thenardier", weight=2) + G.add_edge("Anzelma", "MmeThenardier", weight=1) + G.add_edge("Woman2", "Valjean", weight=3) + G.add_edge("Woman2", "Cosette", weight=1) + G.add_edge("Woman2", "Javert", weight=1) + G.add_edge("MotherInnocent", "Fauchelevent", weight=3) + G.add_edge("MotherInnocent", "Valjean", weight=1) + G.add_edge("Gribier", "Fauchelevent", weight=2) + G.add_edge("MmeBurgon", "Jondrette", weight=1) + G.add_edge("Gavroche", "MmeBurgon", weight=2) + G.add_edge("Gavroche", "Thenardier", weight=1) + G.add_edge("Gavroche", "Javert", weight=1) + G.add_edge("Gavroche", "Valjean", weight=1) + G.add_edge("Gillenormand", "Cosette", weight=3) + G.add_edge("Gillenormand", "Valjean", weight=2) + G.add_edge("Magnon", "Gillenormand", weight=1) + G.add_edge("Magnon", "MmeThenardier", weight=1) + G.add_edge("MlleGillenormand", "Gillenormand", weight=9) + G.add_edge("MlleGillenormand", "Cosette", weight=2) + G.add_edge("MlleGillenormand", "Valjean", weight=2) + G.add_edge("MmePontmercy", "MlleGillenormand", weight=1) + G.add_edge("MmePontmercy", "Pontmercy", weight=1) + G.add_edge("MlleVaubois", "MlleGillenormand", weight=1) + G.add_edge("LtGillenormand", "MlleGillenormand", weight=2) + G.add_edge("LtGillenormand", "Gillenormand", weight=1) + G.add_edge("LtGillenormand", "Cosette", weight=1) + G.add_edge("Marius", "MlleGillenormand", weight=6) + G.add_edge("Marius", "Gillenormand", weight=12) + G.add_edge("Marius", "Pontmercy", weight=1) + G.add_edge("Marius", "LtGillenormand", weight=1) + G.add_edge("Marius", "Cosette", weight=21) + G.add_edge("Marius", "Valjean", weight=19) + G.add_edge("Marius", "Tholomyes", weight=1) + G.add_edge("Marius", "Thenardier", weight=2) + G.add_edge("Marius", "Eponine", weight=5) + G.add_edge("Marius", "Gavroche", weight=4) + G.add_edge("BaronessT", "Gillenormand", weight=1) + G.add_edge("BaronessT", "Marius", weight=1) + G.add_edge("Mabeuf", "Marius", weight=1) + G.add_edge("Mabeuf", "Eponine", weight=1) + G.add_edge("Mabeuf", "Gavroche", weight=1) + G.add_edge("Enjolras", "Marius", weight=7) + G.add_edge("Enjolras", "Gavroche", weight=7) + G.add_edge("Enjolras", "Javert", weight=6) + G.add_edge("Enjolras", "Mabeuf", weight=1) + G.add_edge("Enjolras", "Valjean", weight=4) + G.add_edge("Combeferre", "Enjolras", weight=15) + G.add_edge("Combeferre", "Marius", weight=5) + G.add_edge("Combeferre", "Gavroche", weight=6) + G.add_edge("Combeferre", "Mabeuf", weight=2) + G.add_edge("Prouvaire", "Gavroche", weight=1) + G.add_edge("Prouvaire", "Enjolras", weight=4) + G.add_edge("Prouvaire", "Combeferre", weight=2) + G.add_edge("Feuilly", "Gavroche", weight=2) + G.add_edge("Feuilly", "Enjolras", weight=6) + G.add_edge("Feuilly", "Prouvaire", weight=2) + G.add_edge("Feuilly", "Combeferre", weight=5) + G.add_edge("Feuilly", "Mabeuf", weight=1) + G.add_edge("Feuilly", "Marius", weight=1) + G.add_edge("Courfeyrac", "Marius", weight=9) + G.add_edge("Courfeyrac", "Enjolras", weight=17) + G.add_edge("Courfeyrac", "Combeferre", weight=13) + G.add_edge("Courfeyrac", "Gavroche", weight=7) + G.add_edge("Courfeyrac", "Mabeuf", weight=2) + G.add_edge("Courfeyrac", "Eponine", weight=1) + G.add_edge("Courfeyrac", "Feuilly", weight=6) + G.add_edge("Courfeyrac", "Prouvaire", weight=3) + G.add_edge("Bahorel", "Combeferre", weight=5) + G.add_edge("Bahorel", "Gavroche", weight=5) + G.add_edge("Bahorel", "Courfeyrac", weight=6) + G.add_edge("Bahorel", "Mabeuf", weight=2) + G.add_edge("Bahorel", "Enjolras", weight=4) + G.add_edge("Bahorel", "Feuilly", weight=3) + G.add_edge("Bahorel", "Prouvaire", weight=2) + G.add_edge("Bahorel", "Marius", weight=1) + G.add_edge("Bossuet", "Marius", weight=5) + G.add_edge("Bossuet", "Courfeyrac", weight=12) + G.add_edge("Bossuet", "Gavroche", weight=5) + G.add_edge("Bossuet", "Bahorel", weight=4) + G.add_edge("Bossuet", "Enjolras", weight=10) + G.add_edge("Bossuet", "Feuilly", weight=6) + G.add_edge("Bossuet", "Prouvaire", weight=2) + G.add_edge("Bossuet", "Combeferre", weight=9) + G.add_edge("Bossuet", "Mabeuf", weight=1) + G.add_edge("Bossuet", "Valjean", weight=1) + G.add_edge("Joly", "Bahorel", weight=5) + G.add_edge("Joly", "Bossuet", weight=7) + G.add_edge("Joly", "Gavroche", weight=3) + G.add_edge("Joly", "Courfeyrac", weight=5) + G.add_edge("Joly", "Enjolras", weight=5) + G.add_edge("Joly", "Feuilly", weight=5) + G.add_edge("Joly", "Prouvaire", weight=2) + G.add_edge("Joly", "Combeferre", weight=5) + G.add_edge("Joly", "Mabeuf", weight=1) + G.add_edge("Joly", "Marius", weight=2) + G.add_edge("Grantaire", "Bossuet", weight=3) + G.add_edge("Grantaire", "Enjolras", weight=3) + G.add_edge("Grantaire", "Combeferre", weight=1) + G.add_edge("Grantaire", "Courfeyrac", weight=2) + G.add_edge("Grantaire", "Joly", weight=2) + G.add_edge("Grantaire", "Gavroche", weight=1) + G.add_edge("Grantaire", "Bahorel", weight=1) + G.add_edge("Grantaire", "Feuilly", weight=1) + G.add_edge("Grantaire", "Prouvaire", weight=1) + G.add_edge("MotherPlutarch", "Mabeuf", weight=3) + G.add_edge("Gueulemer", "Thenardier", weight=5) + G.add_edge("Gueulemer", "Valjean", weight=1) + G.add_edge("Gueulemer", "MmeThenardier", weight=1) + G.add_edge("Gueulemer", "Javert", weight=1) + G.add_edge("Gueulemer", "Gavroche", weight=1) + G.add_edge("Gueulemer", "Eponine", weight=1) + G.add_edge("Babet", "Thenardier", weight=6) + G.add_edge("Babet", "Gueulemer", weight=6) + G.add_edge("Babet", "Valjean", weight=1) + G.add_edge("Babet", "MmeThenardier", weight=1) + G.add_edge("Babet", "Javert", weight=2) + G.add_edge("Babet", "Gavroche", weight=1) + G.add_edge("Babet", "Eponine", weight=1) + G.add_edge("Claquesous", "Thenardier", weight=4) + G.add_edge("Claquesous", "Babet", weight=4) + G.add_edge("Claquesous", "Gueulemer", weight=4) + G.add_edge("Claquesous", "Valjean", weight=1) + G.add_edge("Claquesous", "MmeThenardier", weight=1) + G.add_edge("Claquesous", "Javert", weight=1) + G.add_edge("Claquesous", "Eponine", weight=1) + G.add_edge("Claquesous", "Enjolras", weight=1) + G.add_edge("Montparnasse", "Javert", weight=1) + G.add_edge("Montparnasse", "Babet", weight=2) + G.add_edge("Montparnasse", "Gueulemer", weight=2) + G.add_edge("Montparnasse", "Claquesous", weight=2) + G.add_edge("Montparnasse", "Valjean", weight=1) + G.add_edge("Montparnasse", "Gavroche", weight=1) + G.add_edge("Montparnasse", "Eponine", weight=1) + G.add_edge("Montparnasse", "Thenardier", weight=1) + G.add_edge("Toussaint", "Cosette", weight=2) + G.add_edge("Toussaint", "Javert", weight=1) + G.add_edge("Toussaint", "Valjean", weight=1) + G.add_edge("Child1", "Gavroche", weight=2) + G.add_edge("Child2", "Gavroche", weight=2) + G.add_edge("Child2", "Child1", weight=3) + G.add_edge("Brujon", "Babet", weight=3) + G.add_edge("Brujon", "Gueulemer", weight=3) + G.add_edge("Brujon", "Thenardier", weight=3) + G.add_edge("Brujon", "Gavroche", weight=1) + G.add_edge("Brujon", "Eponine", weight=1) + G.add_edge("Brujon", "Claquesous", weight=1) + G.add_edge("Brujon", "Montparnasse", weight=1) + G.add_edge("MmeHucheloup", "Bossuet", weight=1) + G.add_edge("MmeHucheloup", "Joly", weight=1) + G.add_edge("MmeHucheloup", "Grantaire", weight=1) + G.add_edge("MmeHucheloup", "Bahorel", weight=1) + G.add_edge("MmeHucheloup", "Courfeyrac", weight=1) + G.add_edge("MmeHucheloup", "Gavroche", weight=1) + G.add_edge("MmeHucheloup", "Enjolras", weight=1) + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/spectral_graph_forge.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/spectral_graph_forge.py new file mode 100644 index 0000000000000000000000000000000000000000..0371b68f59b977c8a9f7b285b1964a8afa7879c8 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/spectral_graph_forge.py @@ -0,0 +1,121 @@ +"""Generates graphs with a given eigenvector structure""" + + +import networkx as nx +from networkx.utils import np_random_state + +__all__ = ["spectral_graph_forge"] + + +@np_random_state(3) +@nx._dispatch +def spectral_graph_forge(G, alpha, transformation="identity", seed=None): + """Returns a random simple graph with spectrum resembling that of `G` + + This algorithm, called Spectral Graph Forge (SGF), computes the + eigenvectors of a given graph adjacency matrix, filters them and + builds a random graph with a similar eigenstructure. + SGF has been proved to be particularly useful for synthesizing + realistic social networks and it can also be used to anonymize + graph sensitive data. + + Parameters + ---------- + G : Graph + alpha : float + Ratio representing the percentage of eigenvectors of G to consider, + values in [0,1]. + transformation : string, optional + Represents the intended matrix linear transformation, possible values + are 'identity' and 'modularity' + seed : integer, random_state, or None (default) + Indicator of numpy random number generation state. + See :ref:`Randomness`. + + Returns + ------- + H : Graph + A graph with a similar eigenvector structure of the input one. + + Raises + ------ + NetworkXError + If transformation has a value different from 'identity' or 'modularity' + + Notes + ----- + Spectral Graph Forge (SGF) generates a random simple graph resembling the + global properties of the given one. + It leverages the low-rank approximation of the associated adjacency matrix + driven by the *alpha* precision parameter. + SGF preserves the number of nodes of the input graph and their ordering. + This way, nodes of output graphs resemble the properties of the input one + and attributes can be directly mapped. + + It considers the graph adjacency matrices which can optionally be + transformed to other symmetric real matrices (currently transformation + options include *identity* and *modularity*). + The *modularity* transformation, in the sense of Newman's modularity matrix + allows the focusing on community structure related properties of the graph. + + SGF applies a low-rank approximation whose fixed rank is computed from the + ratio *alpha* of the input graph adjacency matrix dimension. + This step performs a filtering on the input eigenvectors similar to the low + pass filtering common in telecommunications. + + The filtered values (after truncation) are used as input to a Bernoulli + sampling for constructing a random adjacency matrix. + + References + ---------- + .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, "Spectral Graph Forge: + Graph Generation Targeting Modularity", IEEE Infocom, '18. + https://arxiv.org/abs/1801.01715 + .. [2] M. Newman, "Networks: an introduction", Oxford university press, + 2010 + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> H = nx.spectral_graph_forge(G, 0.3) + >>> + """ + import numpy as np + import scipy as sp + + available_transformations = ["identity", "modularity"] + alpha = np.clip(alpha, 0, 1) + A = nx.to_numpy_array(G) + n = A.shape[1] + level = round(n * alpha) + + if transformation not in available_transformations: + msg = f"{transformation!r} is not a valid transformation. " + msg += f"Transformations: {available_transformations}" + raise nx.NetworkXError(msg) + + K = np.ones((1, n)) @ A + + B = A + if transformation == "modularity": + B -= K.T @ K / K.sum() + + # Compute low-rank approximation of B + evals, evecs = np.linalg.eigh(B) + k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order + evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 + B = evecs @ np.diag(evals) @ evecs.T + + if transformation == "modularity": + B += K.T @ K / K.sum() + + B = np.clip(B, 0, 1) + np.fill_diagonal(B, 0) + + for i in range(n - 1): + B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) + B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) + + H = nx.from_numpy_array(B) + + return H diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/stochastic.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/stochastic.py new file mode 100644 index 0000000000000000000000000000000000000000..b8084532bc32666f644ac630372e846cd6b83d46 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/stochastic.py @@ -0,0 +1,51 @@ +"""Functions for generating stochastic graphs from a given weighted directed +graph. + +""" + +import networkx as nx +from networkx.classes import DiGraph, MultiDiGraph +from networkx.utils import not_implemented_for + +__all__ = ["stochastic_graph"] + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def stochastic_graph(G, copy=True, weight="weight"): + """Returns a right-stochastic representation of directed graph `G`. + + A right-stochastic graph is a weighted digraph in which for each + node, the sum of the weights of all the out-edges of that node is + 1. If the graph is already weighted (for example, via a 'weight' + edge attribute), the reweighting takes that into account. + + Parameters + ---------- + G : directed graph + A :class:`~networkx.DiGraph` or :class:`~networkx.MultiDiGraph`. + + copy : boolean, optional + If this is True, then this function returns a new graph with + the stochastic reweighting. Otherwise, the original graph is + modified in-place (and also returned, for convenience). + + weight : edge attribute key (optional, default='weight') + Edge attribute key used for reading the existing weight and + setting the new weight. If no attribute with this key is found + for an edge, then the edge weight is assumed to be 1. If an edge + has a weight, it must be a positive number. + + """ + if copy: + G = MultiDiGraph(G) if G.is_multigraph() else DiGraph(G) + # There is a tradeoff here: the dictionary of node degrees may + # require a lot of memory, whereas making a call to `G.out_degree` + # inside the loop may be costly in computation time. + degree = dict(G.out_degree(weight=weight)) + for u, v, d in G.edges(data=True): + if degree[u] == 0: + d[weight] = 0 + else: + d[weight] = d.get(weight, 1) / degree[u] + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/trees.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/trees.py new file mode 100644 index 0000000000000000000000000000000000000000..a24f8994c5f521b45d4250f16700a13037c98dcc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/trees.py @@ -0,0 +1,1156 @@ +"""Functions for generating trees. + +The functions sampling trees at random in this module come +in two variants: labeled and unlabeled. The labeled variants +sample from every possible tree with the given number of nodes +uniformly at random. The unlabeled variants sample from every +possible *isomorphism class* of trees with the given number +of nodes uniformly at random. + +To understand the difference, consider the following example. +There are two isomorphism classes of trees with four nodes. +One is that of the path graph, the other is that of the +star graph. The unlabeled variant will return a line graph or +a star graph with probability 1/2. + +The labeled variant will return the line graph +with probability 3/4 and the star graph with probability 1/4, +because there are more labeled variants of the line graph +than of the star graph. More precisely, the line graph has +an automorphism group of order 2, whereas the star graph has +an automorphism group of order 6, so the line graph has three +times as many labeled variants as the star graph, and thus +three more chances to be drawn. + +Additionally, some functions in this module can sample rooted +trees and forests uniformly at random. A rooted tree is a tree +with a designated root node. A rooted forest is a disjoint union +of rooted trees. +""" + +import warnings +from collections import Counter, defaultdict +from math import comb, factorial + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "prefix_tree", + "prefix_tree_recursive", + "random_tree", + "random_labeled_tree", + "random_labeled_rooted_tree", + "random_labeled_rooted_forest", + "random_unlabeled_tree", + "random_unlabeled_rooted_tree", + "random_unlabeled_rooted_forest", +] + + +@nx._dispatch(graphs=None) +def prefix_tree(paths): + """Creates a directed prefix tree from a list of paths. + + Usually the paths are described as strings or lists of integers. + + A "prefix tree" represents the prefix structure of the strings. + Each node represents a prefix of some string. The root represents + the empty prefix with children for the single letter prefixes which + in turn have children for each double letter prefix starting with + the single letter corresponding to the parent node, and so on. + + More generally the prefixes do not need to be strings. A prefix refers + to the start of a sequence. The root has children for each one element + prefix and they have children for each two element prefix that starts + with the one element sequence of the parent, and so on. + + Note that this implementation uses integer nodes with an attribute. + Each node has an attribute "source" whose value is the original element + of the path to which this node corresponds. For example, suppose `paths` + consists of one path: "can". Then the nodes `[1, 2, 3]` which represent + this path have "source" values "c", "a" and "n". + + All the descendants of a node have a common prefix in the sequence/path + associated with that node. From the returned tree, the prefix for each + node can be constructed by traversing the tree up to the root and + accumulating the "source" values along the way. + + The root node is always `0` and has "source" attribute `None`. + The root is the only node with in-degree zero. + The nil node is always `-1` and has "source" attribute `"NIL"`. + The nil node is the only node with out-degree zero. + + + Parameters + ---------- + paths: iterable of paths + An iterable of paths which are themselves sequences. + Matching prefixes among these sequences are identified with + nodes of the prefix tree. One leaf of the tree is associated + with each path. (Identical paths are associated with the same + leaf of the tree.) + + + Returns + ------- + tree: DiGraph + A directed graph representing an arborescence consisting of the + prefix tree generated by `paths`. Nodes are directed "downward", + from parent to child. A special "synthetic" root node is added + to be the parent of the first node in each path. A special + "synthetic" leaf node, the "nil" node `-1`, is added to be the child + of all nodes representing the last element in a path. (The + addition of this nil node technically makes this not an + arborescence but a directed acyclic graph; removing the nil node + makes it an arborescence.) + + + Notes + ----- + The prefix tree is also known as a *trie*. + + + Examples + -------- + Create a prefix tree from a list of strings with common prefixes:: + + >>> paths = ["ab", "abs", "ad"] + >>> T = nx.prefix_tree(paths) + >>> list(T.edges) + [(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)] + + The leaf nodes can be obtained as predecessors of the nil node:: + + >>> root, NIL = 0, -1 + >>> list(T.predecessors(NIL)) + [2, 3, 4] + + To recover the original paths that generated the prefix tree, + traverse up the tree from the node `-1` to the node `0`:: + + >>> recovered = [] + >>> for v in T.predecessors(NIL): + ... prefix = "" + ... while v != root: + ... prefix = str(T.nodes[v]["source"]) + prefix + ... v = next(T.predecessors(v)) # only one predecessor + ... recovered.append(prefix) + >>> sorted(recovered) + ['ab', 'abs', 'ad'] + """ + + def get_children(parent, paths): + children = defaultdict(list) + # Populate dictionary with key(s) as the child/children of the root and + # value(s) as the remaining paths of the corresponding child/children + for path in paths: + # If path is empty, we add an edge to the NIL node. + if not path: + tree.add_edge(parent, NIL) + continue + child, *rest = path + # `child` may exist as the head of more than one path in `paths`. + children[child].append(rest) + return children + + # Initialize the prefix tree with a root node and a nil node. + tree = nx.DiGraph() + root = 0 + tree.add_node(root, source=None) + NIL = -1 + tree.add_node(NIL, source="NIL") + children = get_children(root, paths) + stack = [(root, iter(children.items()))] + while stack: + parent, remaining_children = stack[-1] + try: + child, remaining_paths = next(remaining_children) + # Pop item off stack if there are no remaining children + except StopIteration: + stack.pop() + continue + # We relabel each child with an unused name. + new_name = len(tree) - 1 + # The "source" node attribute stores the original node name. + tree.add_node(new_name, source=child) + tree.add_edge(parent, new_name) + children = get_children(new_name, remaining_paths) + stack.append((new_name, iter(children.items()))) + + return tree + + +@nx._dispatch(graphs=None) +def prefix_tree_recursive(paths): + """Recursively creates a directed prefix tree from a list of paths. + + The original recursive version of prefix_tree for comparison. It is + the same algorithm but the recursion is unrolled onto a stack. + + Usually the paths are described as strings or lists of integers. + + A "prefix tree" represents the prefix structure of the strings. + Each node represents a prefix of some string. The root represents + the empty prefix with children for the single letter prefixes which + in turn have children for each double letter prefix starting with + the single letter corresponding to the parent node, and so on. + + More generally the prefixes do not need to be strings. A prefix refers + to the start of a sequence. The root has children for each one element + prefix and they have children for each two element prefix that starts + with the one element sequence of the parent, and so on. + + Note that this implementation uses integer nodes with an attribute. + Each node has an attribute "source" whose value is the original element + of the path to which this node corresponds. For example, suppose `paths` + consists of one path: "can". Then the nodes `[1, 2, 3]` which represent + this path have "source" values "c", "a" and "n". + + All the descendants of a node have a common prefix in the sequence/path + associated with that node. From the returned tree, ehe prefix for each + node can be constructed by traversing the tree up to the root and + accumulating the "source" values along the way. + + The root node is always `0` and has "source" attribute `None`. + The root is the only node with in-degree zero. + The nil node is always `-1` and has "source" attribute `"NIL"`. + The nil node is the only node with out-degree zero. + + + Parameters + ---------- + paths: iterable of paths + An iterable of paths which are themselves sequences. + Matching prefixes among these sequences are identified with + nodes of the prefix tree. One leaf of the tree is associated + with each path. (Identical paths are associated with the same + leaf of the tree.) + + + Returns + ------- + tree: DiGraph + A directed graph representing an arborescence consisting of the + prefix tree generated by `paths`. Nodes are directed "downward", + from parent to child. A special "synthetic" root node is added + to be the parent of the first node in each path. A special + "synthetic" leaf node, the "nil" node `-1`, is added to be the child + of all nodes representing the last element in a path. (The + addition of this nil node technically makes this not an + arborescence but a directed acyclic graph; removing the nil node + makes it an arborescence.) + + + Notes + ----- + The prefix tree is also known as a *trie*. + + + Examples + -------- + Create a prefix tree from a list of strings with common prefixes:: + + >>> paths = ["ab", "abs", "ad"] + >>> T = nx.prefix_tree(paths) + >>> list(T.edges) + [(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)] + + The leaf nodes can be obtained as predecessors of the nil node. + + >>> root, NIL = 0, -1 + >>> list(T.predecessors(NIL)) + [2, 3, 4] + + To recover the original paths that generated the prefix tree, + traverse up the tree from the node `-1` to the node `0`:: + + >>> recovered = [] + >>> for v in T.predecessors(NIL): + ... prefix = "" + ... while v != root: + ... prefix = str(T.nodes[v]["source"]) + prefix + ... v = next(T.predecessors(v)) # only one predecessor + ... recovered.append(prefix) + >>> sorted(recovered) + ['ab', 'abs', 'ad'] + """ + + def _helper(paths, root, tree): + """Recursively create a trie from the given list of paths. + + `paths` is a list of paths, each of which is itself a list of + nodes, relative to the given `root` (but not including it). This + list of paths will be interpreted as a tree-like structure, in + which two paths that share a prefix represent two branches of + the tree with the same initial segment. + + `root` is the parent of the node at index 0 in each path. + + `tree` is the "accumulator", the :class:`networkx.DiGraph` + representing the branching to which the new nodes and edges will + be added. + + """ + # For each path, remove the first node and make it a child of root. + # Any remaining paths then get processed recursively. + children = defaultdict(list) + for path in paths: + # If path is empty, we add an edge to the NIL node. + if not path: + tree.add_edge(root, NIL) + continue + child, *rest = path + # `child` may exist as the head of more than one path in `paths`. + children[child].append(rest) + # Add a node for each child, connect root, recurse to remaining paths + for child, remaining_paths in children.items(): + # We relabel each child with an unused name. + new_name = len(tree) - 1 + # The "source" node attribute stores the original node name. + tree.add_node(new_name, source=child) + tree.add_edge(root, new_name) + _helper(remaining_paths, new_name, tree) + + # Initialize the prefix tree with a root node and a nil node. + tree = nx.DiGraph() + root = 0 + tree.add_node(root, source=None) + NIL = -1 + tree.add_node(NIL, source="NIL") + # Populate the tree. + _helper(paths, root, tree) + return tree + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_tree(n, seed=None, create_using=None): + """Returns a uniformly random tree on `n` nodes. + + .. deprecated:: 3.2 + + ``random_tree`` is deprecated and will be removed in NX v3.4 + Use ``random_labeled_tree`` instead. + + Parameters + ---------- + n : int + A positive integer representing the number of nodes in the tree. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A tree, given as an undirected graph, whose nodes are numbers in + the set {0, …, *n* - 1}. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + Notes + ----- + The current implementation of this function generates a uniformly + random Prüfer sequence then converts that to a tree via the + :func:`~networkx.from_prufer_sequence` function. Since there is a + bijection between Prüfer sequences of length *n* - 2 and trees on + *n* nodes, the tree is chosen uniformly at random from the set of + all trees on *n* nodes. + + Examples + -------- + >>> tree = nx.random_tree(n=10, seed=0) + >>> nx.write_network_text(tree, sources=[0]) + ╙── 0 + ├── 3 + └── 4 + ├── 6 + │ ├── 1 + │ ├── 2 + │ └── 7 + │ └── 8 + │ └── 5 + └── 9 + + >>> tree = nx.random_tree(n=10, seed=0, create_using=nx.DiGraph) + >>> nx.write_network_text(tree) + ╙── 0 + ├─╼ 3 + └─╼ 4 + ├─╼ 6 + │ ├─╼ 1 + │ ├─╼ 2 + │ └─╼ 7 + │ └─╼ 8 + │ └─╼ 5 + └─╼ 9 + """ + warnings.warn( + ( + "\n\nrandom_tree is deprecated and will be removed in NX v3.4\n" + "Use random_labeled_tree instead." + ), + DeprecationWarning, + stacklevel=2, + ) + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + # Cannot create a Prüfer sequence unless `n` is at least two. + if n == 1: + utree = nx.empty_graph(1, create_using) + else: + sequence = [seed.choice(range(n)) for i in range(n - 2)] + utree = nx.from_prufer_sequence(sequence) + + if create_using is None: + tree = utree + else: + tree = nx.empty_graph(0, create_using) + if tree.is_directed(): + # Use a arbitrary root node and dfs to define edge directions + edges = nx.dfs_edges(utree, source=0) + else: + edges = utree.edges + + # Populate the specified graph type + tree.add_nodes_from(utree.nodes) + tree.add_edges_from(edges) + + return tree + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_labeled_tree(n, *, seed=None): + """Returns a labeled tree on `n` nodes chosen uniformly at random. + + Generating uniformly distributed random Prüfer sequences and + converting them into the corresponding trees is a straightforward + method of generating uniformly distributed random labeled trees. + This function implements this method. + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + seed : random_state + Indicator of random number generation state. + See :ref:`Randomness` + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with nodes in the set {0, …, *n* - 1}. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + """ + # Cannot create a Prüfer sequence unless `n` is at least two. + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + if n == 1: + return nx.empty_graph(1) + return nx.from_prufer_sequence([seed.choice(range(n)) for i in range(n - 2)]) + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_labeled_rooted_tree(n, *, seed=None): + """Returns a labeled rooted tree with `n` nodes. + + The returned tree is chosen uniformly at random from all labeled rooted trees. + + Parameters + ---------- + n : int + The number of nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with integer nodes 0 <= node <= `n` - 1. + The root of the tree is selected uniformly from the nodes. + The "root" graph attribute identifies the root of the tree. + + Notes + ----- + This function returns the result of :func:`random_labeled_tree` + with a randomly selected root. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + """ + t = random_labeled_tree(n, seed=seed) + t.graph["root"] = seed.randint(0, n - 1) + return t + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_labeled_rooted_forest(n, *, seed=None): + """Returns a labeled rooted forest with `n` nodes. + + The returned forest is chosen uniformly at random using a + generalization of Prüfer sequences [1]_ in the form described in [2]_. + + Parameters + ---------- + n : int + The number of nodes. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with integer nodes 0 <= node <= `n` - 1. + The "roots" graph attribute is a set of integers containing the roots. + + References + ---------- + .. [1] Knuth, Donald E. "Another Enumeration of Trees." + Canadian Journal of Mathematics, 20 (1968): 1077-1086. + https://doi.org/10.4153/CJM-1968-104-8 + .. [2] Rubey, Martin. "Counting Spanning Trees". Diplomarbeit + zur Erlangung des akademischen Grades Magister der + Naturwissenschaften an der Formal- und Naturwissenschaftlichen + Fakultät der Universität Wien. Wien, May 2000. + """ + + # Select the number of roots by iterating over the cumulative count of trees + # with at most k roots + def _select_k(n, seed): + r = seed.randint(0, (n + 1) ** (n - 1) - 1) + cum_sum = 0 + for k in range(1, n): + cum_sum += (factorial(n - 1) * n ** (n - k)) // ( + factorial(k - 1) * factorial(n - k) + ) + if r < cum_sum: + return k + + return n + + F = nx.empty_graph(n) + if n == 0: + F.graph["roots"] = {} + return F + # Select the number of roots k + k = _select_k(n, seed) + if k == n: + F.graph["roots"] = set(range(n)) + return F # Nothing to do + # Select the roots + roots = seed.sample(range(n), k) + # Nonroots + p = set(range(n)).difference(roots) + # Coding sequence + N = [seed.randint(0, n - 1) for i in range(n - k - 1)] + # Multiset of elements in N also in p + degree = Counter([x for x in N if x in p]) + # Iterator over the elements of p with degree zero + iterator = iter(x for x in p if degree[x] == 0) + u = last = next(iterator) + # This loop is identical to that for Prüfer sequences, + # except that we can draw nodes only from p + for v in N: + F.add_edge(u, v) + degree[v] -= 1 + if v < last and degree[v] == 0: + u = v + else: + last = u = next(iterator) + + F.add_edge(u, roots[0]) + F.graph["roots"] = set(roots) + return F + + +# The following functions support generation of unlabeled trees and forests. + + +def _to_nx(edges, n_nodes, root=None, roots=None): + """ + Converts the (edges, n_nodes) input to a :class:`networkx.Graph`. + The (edges, n_nodes) input is a list of even length, where each pair + of consecutive integers represents an edge, and an integer `n_nodes`. + Integers in the list are elements of `range(n_nodes)`. + + Parameters + ---------- + edges : list of ints + The flattened list of edges of the graph. + n_nodes : int + The number of nodes of the graph. + root: int (default=None) + If not None, the "root" attribute of the graph will be set to this value. + roots: collection of ints (default=None) + If not None, he "roots" attribute of the graph will be set to this value. + + Returns + ------- + :class:`networkx.Graph` + The graph with `n_nodes` nodes and edges given by `edges`. + """ + G = nx.empty_graph(n_nodes) + G.add_edges_from(edges) + if root is not None: + G.graph["root"] = root + if roots is not None: + G.graph["roots"] = roots + return G + + +def _num_rooted_trees(n, cache_trees): + """Returns the number of unlabeled rooted trees with `n` nodes. + + See also https://oeis.org/A000081. + + Parameters + ---------- + n : int + The number of nodes + cache_trees : list of ints + The $i$-th element is the number of unlabeled rooted trees with $i$ nodes, + which is used as a cache (and is extended to length $n+1$ if needed) + + Returns + ------- + int + The number of unlabeled rooted trees with `n` nodes. + """ + for n_i in range(len(cache_trees), n + 1): + cache_trees.append( + sum( + [ + d * cache_trees[n_i - j * d] * cache_trees[d] + for d in range(1, n_i) + for j in range(1, (n_i - 1) // d + 1) + ] + ) + // (n_i - 1) + ) + return cache_trees[n] + + +def _select_jd_trees(n, cache_trees, seed): + """Returns a pair $(j,d)$ with a specific probability + + Given $n$, returns a pair of positive integers $(j,d)$ with the probability + specified in formula (5) of Chapter 29 of [1]_. + + Parameters + ---------- + n : int + The number of nodes + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (int, int) + A pair of positive integers $(j,d)$ satisfying formula (5) of + Chapter 29 of [1]_. + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + p = seed.randint(0, _num_rooted_trees(n, cache_trees) * (n - 1) - 1) + cumsum = 0 + for d in range(n - 1, 0, -1): + for j in range(1, (n - 1) // d + 1): + cumsum += ( + d + * _num_rooted_trees(n - j * d, cache_trees) + * _num_rooted_trees(d, cache_trees) + ) + if p < cumsum: + return (j, d) + + +def _random_unlabeled_rooted_tree(n, cache_trees, seed): + """Returns an unlabeled rooted tree with `n` nodes. + + Returns an unlabeled rooted tree with `n` nodes chosen uniformly + at random using the "RANRUT" algorithm from [1]_. + The tree is returned in the form: (list_of_edges, number_of_nodes) + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + cache_trees : list ints + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (list_of_edges, number_of_nodes) : list, int + A random unlabeled rooted tree with `n` nodes as a 2-tuple + ``(list_of_edges, number_of_nodes)``. + The root is node 0. + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + if n == 1: + edges, n_nodes = [], 1 + return edges, n_nodes + if n == 2: + edges, n_nodes = [(0, 1)], 2 + return edges, n_nodes + + j, d = _select_jd_trees(n, cache_trees, seed) + t1, t1_nodes = _random_unlabeled_rooted_tree(n - j * d, cache_trees, seed) + t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed) + t12 = [(0, t2_nodes * i + t1_nodes) for i in range(j)] + t1.extend(t12) + for _ in range(j): + t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2) + t1_nodes += t2_nodes + + return t1, t1_nodes + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_unlabeled_rooted_tree(n, *, number_of_trees=None, seed=None): + """Returns a number of unlabeled rooted trees uniformly at random + + Returns one or more (depending on `number_of_trees`) + unlabeled rooted trees with `n` nodes drawn uniformly + at random. + + Parameters + ---------- + n : int + The number of nodes + number_of_trees : int or None (default) + If not None, this number of trees is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if `number_of_trees` + is specified) with nodes in the set {0, …, *n* - 1}. + The "root" graph attribute identifies the root of the tree. + + Notes + ----- + The trees are generated using the "RANRUT" algorithm from [1]_. + The algorithm needs to compute some counting functions + that are relatively expensive: in case several trees are needed, + it is advisable to use the `number_of_trees` optional argument + to reuse the counting functions. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + cache_trees = [0, 1] # initial cache of number of rooted trees + if number_of_trees is None: + return _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0) + return [ + _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0) + for i in range(number_of_trees) + ] + + +def _num_rooted_forests(n, q, cache_forests): + """Returns the number of unlabeled rooted forests with `n` nodes, and with + no more than `q` nodes per tree. A recursive formula for this is (2) in + [1]_. This function is implemented using dynamic programming instead of + recursion. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes for each tree of the forest. + cache_forests : list of ints + The $i$-th element is the number of unlabeled rooted forests with + $i$ nodes, and with no more than `q` nodes per tree; this is used + as a cache (and is extended to length `n` + 1 if needed). + + Returns + ------- + int + The number of unlabeled rooted forests with `n` nodes with no more than + `q` nodes per tree. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + for n_i in range(len(cache_forests), n + 1): + q_i = min(n_i, q) + cache_forests.append( + sum( + [ + d * cache_forests[n_i - j * d] * cache_forests[d - 1] + for d in range(1, q_i + 1) + for j in range(1, n_i // d + 1) + ] + ) + // n_i + ) + + return cache_forests[n] + + +def _select_jd_forests(n, q, cache_forests, seed): + """Given `n` and `q`, returns a pair of positive integers $(j,d)$ + such that $j\\leq d$, with probability satisfying (F1) of [1]_. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes for each tree of the forest. + cache_forests : list of ints + Cache for :func:`_num_rooted_forests`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (int, int) + A pair of positive integers $(j,d)$ + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + p = seed.randint(0, _num_rooted_forests(n, q, cache_forests) * n - 1) + cumsum = 0 + for d in range(q, 0, -1): + for j in range(1, n // d + 1): + cumsum += ( + d + * _num_rooted_forests(n - j * d, q, cache_forests) + * _num_rooted_forests(d - 1, q, cache_forests) + ) + if p < cumsum: + return (j, d) + + +def _random_unlabeled_rooted_forest(n, q, cache_trees, cache_forests, seed): + """Returns an unlabeled rooted forest with `n` nodes, and with no more + than `q` nodes per tree, drawn uniformly at random. It is an implementation + of the algorithm "Forest" of [1]_. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes per tree. + cache_trees : + Cache for :func:`_num_rooted_trees`. + cache_forests : + Cache for :func:`_num_rooted_forests`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (edges, n, r) : (list, int, list) + The forest (edges, n) and a list r of root nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n == 0: + return ([], 0, []) + + j, d = _select_jd_forests(n, q, cache_forests, seed) + t1, t1_nodes, r1 = _random_unlabeled_rooted_forest( + n - j * d, q, cache_trees, cache_forests, seed + ) + t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed) + for _ in range(j): + r1.append(t1_nodes) + t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2) + t1_nodes += t2_nodes + return t1, t1_nodes, r1 + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_unlabeled_rooted_forest(n, *, q=None, number_of_forests=None, seed=None): + """Returns a forest or list of forests selected at random. + + Returns one or more (depending on `number_of_forests`) + unlabeled rooted forests with `n` nodes, and with no more than + `q` nodes per tree, drawn uniformly at random. + The "roots" graph attribute identifies the roots of the forest. + + Parameters + ---------- + n : int + The number of nodes + q : int or None (default) + The maximum number of nodes per tree. + number_of_forests : int or None (default) + If not None, this number of forests is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if `number_of_forests` + is specified) with nodes in the set {0, …, *n* - 1}. + The "roots" graph attribute is a set containing the roots + of the trees in the forest. + + Notes + ----- + This function implements the algorithm "Forest" of [1]_. + The algorithm needs to compute some counting functions + that are relatively expensive: in case several trees are needed, + it is advisable to use the `number_of_forests` optional argument + to reuse the counting functions. + + Raises + ------ + ValueError + If `n` is non-zero but `q` is zero. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if q is None: + q = n + if q == 0 and n != 0: + raise ValueError("q must be a positive integer if n is positive.") + + cache_trees = [0, 1] # initial cache of number of rooted trees + cache_forests = [1] # initial cache of number of rooted forests + + if number_of_forests is None: + g, nodes, rs = _random_unlabeled_rooted_forest( + n, q, cache_trees, cache_forests, seed + ) + return _to_nx(g, nodes, roots=set(rs)) + + res = [] + for i in range(number_of_forests): + g, nodes, rs = _random_unlabeled_rooted_forest( + n, q, cache_trees, cache_forests, seed + ) + res.append(_to_nx(g, nodes, roots=set(rs))) + return res + + +def _num_trees(n, cache_trees): + """Returns the number of unlabeled trees with `n` nodes. + + See also https://oeis.org/A000055. + + Parameters + ---------- + n : int + The number of nodes. + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + + Returns + ------- + int + The number of unlabeled trees with `n` nodes. + """ + r = _num_rooted_trees(n, cache_trees) - sum( + [ + _num_rooted_trees(j, cache_trees) * _num_rooted_trees(n - j, cache_trees) + for j in range(1, n // 2 + 1) + ] + ) + if n % 2 == 0: + r += comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2) + return r + + +def _bicenter(n, cache, seed): + """Returns a bi-centroidal tree on `n` nodes drawn uniformly at random. + + This function implements the algorithm Bicenter of [1]_. + + Parameters + ---------- + n : int + The number of nodes (must be even). + cache : list of ints. + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness` + + Returns + ------- + (edges, n) + The tree as a list of edges and number of nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + t, t_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed) + if seed.randint(0, _num_rooted_trees(n // 2, cache)) == 0: + t2, t2_nodes = t, t_nodes + else: + t2, t2_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed) + t.extend([(n1 + (n // 2), n2 + (n // 2)) for n1, n2 in t2]) + t.append((0, n // 2)) + return t, t_nodes + t2_nodes + + +def _random_unlabeled_tree(n, cache_trees, cache_forests, seed): + """Returns a tree on `n` nodes drawn uniformly at random. + It implements the Wilf's algorithm "Free" of [1]_. + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + cache_forests : list of ints + Cache for :func:`_num_rooted_forests`. + seed : random_state + Indicator of random number generation state. + See :ref:`Randomness` + + Returns + ------- + (edges, n) + The tree as a list of edges and number of nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n % 2 == 1: + p = 0 + else: + p = comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2) + if seed.randint(0, _num_trees(n, cache_trees) - 1) < p: + return _bicenter(n, cache_trees, seed) + else: + f, n_f, r = _random_unlabeled_rooted_forest( + n - 1, (n - 1) // 2, cache_trees, cache_forests, seed + ) + for i in r: + f.append((i, n_f)) + return f, n_f + 1 + + +@py_random_state("seed") +@nx._dispatch(graphs=None) +def random_unlabeled_tree(n, *, number_of_trees=None, seed=None): + """Returns a tree or list of trees chosen randomly. + + Returns one or more (depending on `number_of_trees`) + unlabeled trees with `n` nodes drawn uniformly at random. + + Parameters + ---------- + n : int + The number of nodes + number_of_trees : int or None (default) + If not None, this number of trees is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if + `number_of_trees` is specified) with nodes in the set {0, …, *n* - 1}. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + Notes + ----- + This function generates an unlabeled tree uniformly at random using + Wilf's algorithm "Free" of [1]_. The algorithm needs to + compute some counting functions that are relatively expensive: + in case several trees are needed, it is advisable to use the + `number_of_trees` optional argument to reuse the counting + functions. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + + cache_trees = [0, 1] # initial cache of number of rooted trees + cache_forests = [1] # initial cache of number of rooted forests + if number_of_trees is None: + return _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed)) + else: + return [ + _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed)) + for i in range(number_of_trees) + ] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/triads.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/triads.py new file mode 100644 index 0000000000000000000000000000000000000000..86d522bba4af81b8a5bd550a0039acb0f4259a58 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/triads.py @@ -0,0 +1,77 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions that generate the triad graphs, that is, the possible +digraphs on three nodes. + +""" +import networkx as nx +from networkx.classes import DiGraph + +__all__ = ["triad_graph"] + +#: Dictionary mapping triad name to list of directed edges in the +#: digraph representation of that triad (with nodes 'a', 'b', and 'c'). +TRIAD_EDGES = { + "003": [], + "012": ["ab"], + "102": ["ab", "ba"], + "021D": ["ba", "bc"], + "021U": ["ab", "cb"], + "021C": ["ab", "bc"], + "111D": ["ac", "ca", "bc"], + "111U": ["ac", "ca", "cb"], + "030T": ["ab", "cb", "ac"], + "030C": ["ba", "cb", "ac"], + "201": ["ab", "ba", "ac", "ca"], + "120D": ["bc", "ba", "ac", "ca"], + "120U": ["ab", "cb", "ac", "ca"], + "120C": ["ab", "bc", "ac", "ca"], + "210": ["ab", "bc", "cb", "ac", "ca"], + "300": ["ab", "ba", "bc", "cb", "ac", "ca"], +} + + +@nx._dispatch(graphs=None) +def triad_graph(triad_name): + """Returns the triad graph with the given name. + + Each string in the following tuple is a valid triad name:: + + ('003', '012', '102', '021D', '021U', '021C', '111D', '111U', + '030T', '030C', '201', '120D', '120U', '120C', '210', '300') + + Each triad name corresponds to one of the possible valid digraph on + three nodes. + + Parameters + ---------- + triad_name : string + The name of a triad, as described above. + + Returns + ------- + :class:`~networkx.DiGraph` + The digraph on three nodes with the given name. The nodes of the + graph are the single-character strings 'a', 'b', and 'c'. + + Raises + ------ + ValueError + If `triad_name` is not the name of a triad. + + See also + -------- + triadic_census + + """ + if triad_name not in TRIAD_EDGES: + raise ValueError( + f'unknown triad name "{triad_name}"; use one of the triad names' + " in the TRIAD_NAMES constant" + ) + G = DiGraph() + G.add_nodes_from("abc") + G.add_edges_from(TRIAD_EDGES[triad_name]) + return G